[
  {
    "path": ".cargo/audit.toml",
    "content": "# https://docs.rs/crate/cargo-audit/0.10.0/source/audit.toml.example\n[advisories]\nignore = [\n    # time: Potential segfault in the time crate\n    # chdig should not be affected by this, waiting for upstream.\n    \"RUSTSEC-2020-0071\",\n    # ansi_term is Unmaintained\n    \"RUSTSEC-2021-0139\",\n    # term_size is Unmaintained\n    \"RUSTSEC-2020-0163\",\n    # stdweb is unmaintained\n    \"RUSTSEC-2020-0056\",\n\n    # Waiting for upstream\n    # owning_ref: Multiple soundness issues in `owning_ref`\n    \"RUSTSEC-2022-0040\",\n    # nix: Out-of-bounds write in nix::unistd::getgrouplist\n    \"RUSTSEC-2021-0119\",\n    # rustc-serialize: Stack overflow in rustc_serialize when parsing deeply nested JSON\n    \"RUSTSEC-2022-0004\",\n    # atty: Potential unaligned read\n    \"RUSTSEC-2021-0145\",\n]\n"
  },
  {
    "path": ".cargo/config.toml",
    "content": "[build]\nrustflags = [\"--cfg\", \"tokio_unstable\"]\n"
  },
  {
    "path": ".exrc",
    "content": "\"\n\" Add this into your .vimrc, to allow vim handle this file.\n\"\n\" set exrc\n\" set secure \" even after this this is kind of dangerous\n\"\n\nset tabstop=4\nset softtabstop=4\nset shiftwidth=4\nset expandtab\n\nlet detectindent_preferred_indent=4\nlet g:detectindent_preferred_expandtab=1\n"
  },
  {
    "path": ".github/workflows/build.yml",
    "content": "---\nname: Build chdig\n\non:\n  workflow_call:\n    inputs: {}\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  lint:\n    name: Run linters\n    runs-on: ubuntu-22.04\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        persist-credentials: false\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n    - name: cargo check\n      run: cargo check\n    - name: cargo clippy\n      run: cargo clippy\n\n  build-linux:\n    name: Build Linux (x86_64)\n    runs-on: ubuntu-22.04\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        # To fetch tags, but can this be improved using blobless checkout?\n        # [1]. But anyway right it is not important, and unlikely will be,\n        # since the repository is small.\n        #\n        #   [1]: https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/\n        fetch-depth: 0\n        persist-credentials: false\n\n    # Workaround for https://github.com/actions/checkout/issues/882\n    - name: Fix tags for release\n      # will break on a lightweight tag\n      run: git fetch origin +refs/tags/*:refs/tags/*\n\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n\n    - name: Install dependencies\n      run: |\n        # nfpm\n        curl -sS -Lo /tmp/nfpm.deb \"https://github.com/goreleaser/nfpm/releases/download/v2.43.4/nfpm_2.43.4_amd64.deb\"\n        sudo dpkg -i /tmp/nfpm.deb\n        # for building cityhash for clickhouse-rs\n        sudo apt-get install -y musl-tools\n        # gcc cannot do cross compile, and there is no musl-g++ in musl-tools\n        sudo ln -srf /usr/bin/clang /usr/bin/musl-g++\n        # musl for static binaries\n        rustup target add x86_64-unknown-linux-musl\n\n    - name: Run tests\n      run: make test\n\n    - name: Build\n      run: |\n        set -x\n        make packages target=x86_64-unknown-linux-musl\n        ls -l\n        declare -A mapping\n        mapping[chdig*.x86_64.rpm]=chdig-latest.x86_64.rpm\n        mapping[chdig*-x86_64.pkg.tar.zst]=chdig-latest-x86_64.pkg.tar.zst\n        mapping[chdig*-x86_64.tar.gz]=chdig-latest-x86_64.tar.gz\n        mapping[chdig*_amd64.deb]=chdig-latest_amd64.deb\n        mapping[target/chdig]=chdig-amd64\n        for pattern in \"${!mapping[@]}\"; do\n            cp $pattern ${mapping[$pattern]}\n        done\n\n    - name: Check package\n      run: |\n        sudo dpkg -i chdig-latest_amd64.deb\n        chdig --help\n\n    - name: Archive Packages\n      uses: actions/upload-artifact@v4\n      with:\n        name: linux-packages-amd64\n        path: |\n          chdig-amd64\n          *.deb\n          *.rpm\n          *.tar.*\n\n  build-linux-no-features:\n    name: Build Linux (no features)\n    runs-on: ubuntu-22.04\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        persist-credentials: false\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n    - name: Run tests\n      run: make test\n    - name: Build\n      run: |\n        cargo build --no-default-features\n    - name: Check package\n      run: |\n        cargo run --no-default-features -- --help\n\n  build-macos-x86_64:\n    name: Build MacOS (x86_64)\n    runs-on: macos-15-intel\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        # To fetch tags, but can this be improved using blobless checkout?\n        # [1]. But anyway right it is not important, and unlikely will be,\n        # since the repository is small.\n        #\n        #   [1]: https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/\n        fetch-depth: 0\n        persist-credentials: false\n\n    # Workaround for https://github.com/actions/checkout/issues/882\n    - name: Fix tags for release\n      # will break on a lightweight tag\n      run: git fetch origin +refs/tags/*:refs/tags/*\n\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n\n    - name: Worker info\n      run: |\n        # SDKs versions\n        ls -al /Library/Developer/CommandLineTools/SDKs/\n\n    - name: Build\n      run: |\n        set -x\n        make deploy-binary\n        cp target/chdig chdig-macos-x86_64\n\n    - name: Check package\n      run: |\n        ./chdig-macos-x86_64 --help\n\n    - name: Archive Packages\n      uses: actions/upload-artifact@v4\n      with:\n        name: macos-packages-x86_64\n        path: |\n          chdig-macos-x86_64\n\n  build-macos-arm64:\n    name: Build MacOS (arm64)\n    runs-on: macos-26\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        # To fetch tags, but can this be improved using blobless checkout?\n        # [1]. But anyway right it is not important, and unlikely will be,\n        # since the repository is small.\n        #\n        #   [1]: https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/\n        fetch-depth: 0\n        persist-credentials: false\n\n    # Workaround for https://github.com/actions/checkout/issues/882\n    - name: Fix tags for release\n      # will break on a lightweight tag\n      run: git fetch origin +refs/tags/*:refs/tags/*\n\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n\n    - name: Worker info\n      run: |\n        # SDKs versions\n        ls -al /Library/Developer/CommandLineTools/SDKs/\n\n    - name: Build\n      run: |\n        set -x\n        make deploy-binary\n        cp target/chdig chdig-macos-arm64\n\n    - name: Check package\n      run: |\n        ./chdig-macos-arm64 --help\n\n    - name: Archive Packages\n      uses: actions/upload-artifact@v4\n      with:\n        name: macos-packages-arm64\n        path: |\n          chdig-macos-arm64\n\n  build-windows:\n    name: Build Windows\n    runs-on: windows-latest\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        # To fetch tags, but can this be improved using blobless checkout?\n        # [1]. But anyway right it is not important, and unlikely will be,\n        # since the repository is small.\n        #\n        #   [1]: https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/\n        fetch-depth: 0\n        persist-credentials: false\n\n    # Workaround for https://github.com/actions/checkout/issues/882\n    - name: Fix tags for release\n      # will break on a lightweight tag\n      run: git fetch origin +refs/tags/*:refs/tags/*\n\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n\n    - name: Build\n      run: |\n        make deploy-binary\n        cp target/chdig.exe chdig-windows-x86_64.exe\n\n    - name: Archive Packages\n      uses: actions/upload-artifact@v4\n      with:\n        name: windows-packages-x86_64\n        path: |\n          chdig-windows-x86_64.exe\n\n  build-linux-aarch64:\n    name: Build Linux (aarch64)\n    runs-on: ubuntu-22.04-arm\n\n    steps:\n    - uses: actions/checkout@v3\n      with:\n        # To fetch tags, but can this be improved using blobless checkout?\n        # [1]. But anyway right it is not important, and unlikely will be,\n        # since the repository is small.\n        #\n        #   [1]: https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/\n        fetch-depth: 0\n        persist-credentials: false\n\n    # Workaround for https://github.com/actions/checkout/issues/882\n    - name: Fix tags for release\n      # will break on a lightweight tag\n      run: git fetch origin +refs/tags/*:refs/tags/*\n\n    - uses: Swatinem/rust-cache@v2\n      with:\n        cache-on-failure: true\n\n    - name: Install dependencies\n      run: |\n        # nfpm\n        curl -sS -Lo /tmp/nfpm.deb \"https://github.com/goreleaser/nfpm/releases/download/v2.43.4/nfpm_2.43.4_arm64.deb\"\n        sudo dpkg -i /tmp/nfpm.deb\n        # for building cityhash for clickhouse-rs\n        sudo apt-get install -y musl-tools\n        # gcc cannot do cross compile, and there is no musl-g++ in musl-tools\n        sudo ln -srf /usr/bin/clang /usr/bin/musl-g++\n        # \"Compiler family detection failed due to error: ToolNotFound: failed to find tool \"aarch64-linux-musl-g++\": No such file or directory\"\n        sudo ln -srf /usr/bin/clang /usr/bin/aarch64-linux-musl-g++\n        # musl for static binaries\n        rustup target add aarch64-unknown-linux-musl\n\n    - name: Run tests\n      run: make test\n\n    - name: Build\n      run: |\n        set -x\n        make packages target=aarch64-unknown-linux-musl\n        ls -l\n        declare -A mapping\n        mapping[chdig*.aarch64.rpm]=chdig-latest.aarch64.rpm\n        mapping[chdig*-aarch64.pkg.tar.zst]=chdig-latest-aarch64.pkg.tar.zst\n        mapping[chdig*-aarch64.tar.gz]=chdig-latest-aarch64.tar.gz\n        mapping[chdig*_arm64.deb]=chdig-latest_arm64.deb\n        mapping[target/chdig]=chdig-aarch64\n        for pattern in \"${!mapping[@]}\"; do\n            cp $pattern ${mapping[$pattern]}\n        done\n\n    - name: Check package\n      run: |\n        sudo dpkg -i chdig-latest_arm64.deb\n        chdig --help\n\n    - name: Archive Packages\n      uses: actions/upload-artifact@v4\n      with:\n        name: linux-packages-aarch64\n        path: |\n          chdig-aarch64\n          *.deb\n          *.rpm\n          *.tar.*\n"
  },
  {
    "path": ".github/workflows/pre_release.yml",
    "content": "---\nname: pre-release\n\non:\n  push:\n    branches:\n    - main\n\njobs:\n  build:\n    uses: ./.github/workflows/build.yml\n\n  publish-pre-release:\n    name: Publish Pre Release\n    runs-on: ubuntu-22.04\n\n    permissions:\n      contents: write\n\n    needs:\n    - build\n\n    steps:\n    - name: Download artifacts\n      uses: actions/download-artifact@v4\n    - uses: \"marvinpinto/action-automatic-releases@latest\"\n      with:\n        repo_token: \"${{ secrets.GITHUB_TOKEN }}\"\n        prerelease: true\n        automatic_release_tag: \"latest\"\n        title: \"Development Build\"\n        files: |\n          macos-packages-x86_64/*\n          macos-packages-arm64/*\n          windows-packages-x86_64/*\n          linux-packages-amd64/*\n          linux-packages-aarch64/*\n"
  },
  {
    "path": ".github/workflows/pull_request.yml",
    "content": "---\nname: pull_request\n\non:\n  pull_request:\n    types:\n    - synchronize\n    - reopened\n    - opened\n    branches:\n    - main\n    paths-ignore:\n    - '**.md'\n    - 'Documentation/**'\n\njobs:\n  spellcheck:\n    name: Spell Check with Typos\n    runs-on: ubuntu-latest\n    steps:\n    - name: Checkout Actions Repository\n      uses: actions/checkout@v4\n\n    - name: Spell Check Repo\n      uses: crate-ci/typos@v1.31.1\n      with:\n        config: typos.toml\n\n  build:\n    needs: spellcheck\n    uses: ./.github/workflows/build.yml\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "---\nname: release\n\non:\n  push:\n    tags:\n    - \"v*\"\n\njobs:\n  build:\n    uses: ./.github/workflows/build.yml\n\n  publish-release:\n    name: Publish Release\n    runs-on: ubuntu-22.04\n\n    permissions:\n      contents: write\n\n    needs:\n    - build\n\n    steps:\n    - name: Download artifacts\n      uses: actions/download-artifact@v4\n    - uses: \"marvinpinto/action-automatic-releases@latest\"\n      with:\n        repo_token: \"${{ secrets.GITHUB_TOKEN }}\"\n        prerelease: false\n        files: |\n          macos-packages-x86_64/*\n          macos-packages-arm64/*\n          windows-packages-x86_64/*\n          linux-packages-amd64/*\n          linux-packages-aarch64/*\n\n    - name: Generate PKGBUILD\n      run: |\n        set -x\n\n        VERSION=\"${GITHUB_REF##*/}\"\n        VERSION=\"${VERSION#v}\"\n        SHA256_x86_64=$(sha256sum linux-packages-amd64/chdig-$VERSION-1-x86_64.pkg.tar.zst | cut -d' ' -f1)\n        SHA256_aarch64=$(sha256sum linux-packages-aarch64/chdig-$VERSION-1-aarch64.pkg.tar.zst | cut -d' ' -f1)\n\n        cat > PKGBUILD <<EOL\n        # shellcheck disable=SC2034,SC2154\n        # - SC2034 - appears unused.\n        # - SC2154 - pkgdir is referenced but not assigned.\n\n        # Maintainer: Azat Khuzhin <a3at.mail@gmail.com>\n        pkgname=chdig-bin\n        pkgver=$VERSION\n        pkgrel=1\n        pkgdesc=\"Dig into ClickHouse with TUI interface (binaries for latest stable version)\"\n        arch=('x86_64' 'aarch64')\n        conflicts=(\"chdig\")\n        provides=(\"chdig\")\n        url=\"https://github.com/azat/chdig\"\n        license=('MIT')\n        source_x86_64=(\"https://github.com/azat/chdig/releases/download/v\\$pkgver/chdig-\\$pkgver-1-x86_64.pkg.tar.zst\")\n        source_aarch64=(\"https://github.com/azat/chdig/releases/download/v\\$pkgver/chdig-\\$pkgver-1-aarch64.pkg.tar.zst\")\n        sha256sums_x86_64=('$SHA256_x86_64')\n        sha256sums_aarch64=('$SHA256_aarch64')\n\n        package() {\n            tar -C \"\\$pkgdir\" -xvf chdig-\\$pkgver-1-\\$(uname -m).pkg.tar.zst\n            rm -f \"\\$pkgdir/.PKGINFO\"\n            rm -f \"\\$pkgdir/.MTREE\"\n        }\n        # vim set: ts=4 sw=4 et\n        EOL\n        cat PKGBUILD\n    - name: Publish to the AUR\n      uses: KSXGitHub/github-actions-deploy-aur@v4.1.3\n      if: ${{ github.event.repository.fork == false }}\n      with:\n        pkgname: chdig-bin\n        pkgbuild: PKGBUILD\n        commit_username: Azat Khuzhin\n        commit_email: a3at.mail@gmail.com\n        ssh_private_key: ${{ secrets.AUR_SSH_PRIVATE_KEY }}\n        commit_message: Release ${{ github.ref_name }}\n        # force_push: 'true'\n"
  },
  {
    "path": ".gitignore",
    "content": "# cargo\ntarget\n/vendor\n# distribution\ndist\n# packages\n*.deb\n*.tar.*\n*.tar\n*.rpm\n# intellij\n.idea/\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "---\nrepos:\n- repo: https://github.com/pre-commit/pre-commit-hooks\n  rev: v4.5.0\n  hooks:\n  - id: check-byte-order-marker\n  - id: check-yaml\n  - id: end-of-file-fixer\n  - id: mixed-line-ending\n  - id: trailing-whitespace\n- repo: https://github.com/pre-commit/pre-commit\n  rev: v3.6.0\n  hooks:\n  - id: validate_manifest\n- repo: https://github.com/doublify/pre-commit-rust\n  rev: v1.0\n  hooks:\n  - id: fmt\n    pass_filenames: false\n  - id: cargo-check\n  - id: clippy\n- repo: https://github.com/adrienverge/yamllint.git\n  rev: v1.35.1\n  hooks:\n  - id: yamllint\n"
  },
  {
    "path": ".yamllint",
    "content": "# vi: ft=yaml\n---\nextends: default\n\nrules:\n  indentation:\n    spaces: 2\n    level: error\n    indent-sequences: false\n  line-length:\n    max: 250\n  braces:\n    max-spaces-inside: 1\n  truthy:\n    allowed-values: ['true', 'false', 'yes', 'no']\n    check-keys: true\n  comments:\n    # this is useful to distinguish commented code from comments\n    require-starting-space: false\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"chdig\"\nauthors = [\"Azat Khuzhin <a3at.mail@gmail.com>\"]\nhomepage = \"https://github.com/azat/chdig\"\nrepository = \"https://github.com/azat/chdig\"\nreadme = \"README.md\"\ndescription = \"Dig into ClickHouse with TUI interface\"\nlicense = \"MIT\"\nversion = \"26.4.3\"\nedition = \"2024\"\n\n[lib]\nname = \"chdig\"\ncrate-type = [\"staticlib\", \"lib\"]\npath = \"src/lib.rs\"\n\n[[bin]]\nname = \"chdig\"\npath = \"src/main.rs\"\n\n[features]\ndefault = [\"tls\"]\ntls = [\"clickhouse-rs/tls-rustls\"]\ntokio-console = [\"dep:console-subscriber\", \"tokio/tracing\"]\n\n[patch.crates-io]\ncursive = { git = \"https://github.com/azat-rust/cursive\", branch = \"chdig-next\" }\ncursive_core = { git = \"https://github.com/azat-rust/cursive\", branch = \"chdig-next\" }\n\n[dependencies]\n# Basic\nanyhow = { version = \"*\", default-features = false, features = [\"std\"] }\nlibc = { version = \"*\", default-features = false }\nsize = { version = \"*\", default-features = false, features = [\"std\"] }\ntempfile = { version = \"*\", default-features = false }\nurl = { version = \"*\", default-features = false }\nhumantime = { version = \"*\", default-features = false }\nbacktrace = { version = \"*\", default-features = false, features = [\"std\"] }\nfutures = { version = \"*\", default-features = false, features = [\"std\"] }\nstrfmt = { version = \"*\", default-features = false }\nfuzzy-matcher = { version = \"*\", default-features = false }\n# chrono/chrono-tz should match clickhouse-rs\nchrono = { version = \"0.4\", default-features = false, features = [\"std\", \"clock\"] }\nchrono-tz = { version = \"0.8\", default-features = false }\nflexi_logger = { version = \"0.27\", default-features = false }\nlog = { version = \"0.4\", default-features = false }\nfutures-util = { version = \"*\", default-features = false }\nsemver = { version = \"*\", default-features = false }\nserde = { version = \"*\", features = [\"derive\"] }\nserde_json = { version = \"*\", default-features = false, features = [\"std\"] }\nserde_yaml = { version = \"*\", default-features = false }\nquick-xml = { version = \"*\", features = [\"serialize\"] }\npercent-encoding = { version = \"*\", default-features = false }\nregex = { version = \"*\", default-features = false, features = [\"std\"] }\n# CLI\nclap = { version = \"*\", default-features = false, features = [\"derive\", \"env\", \"help\", \"usage\", \"std\", \"color\", \"error-context\", \"suggestions\"] }\nclap_complete = { version = \"*\", default-features = false }\n# UI\ncursive = { version = \"*\", default-features = false, features = [\"crossterm-backend\"] }\ncursive-syntect = { version = \"*\", default-features = true }\nunicode-width = \"0.1\"\ncursive-flexi-logger-view = { git = \"https://github.com/azat-rust/cursive-flexi-logger-view\", branch = \"next\", default-features = false }\nsyntect = { version = \"*\", default-features = false, features = [\"default-syntaxes\", \"default-themes\"] }\narboard = { version = \"*\", default-features = false }\nclickhouse-rs = { git = \"https://github.com/azat-rust/clickhouse-rs\", branch = \"next\", default-features = false, features = [\"tokio_io\"] }\ntokio = { version = \"*\", default-features = false, features = [\"macros\"] }\nconsole-subscriber = { version = \"*\", default-features = false, optional = true }\n# Flamegraphs\nflamelens = { git = \"https://github.com/azat-rust/flamelens\", branch = \"diff-mode\", default-features = false }\nratatui = { version = \"0.29.0\", features = [\"unstable-rendered-line-info\"] }\n# Should **only** with the flamelens, since cursive re-export it, while flamelens does not\ncrossterm = { version = \"0.28.1\", features = [\"use-dev-tty\"] }\n# Perfetto\nperfetto_protos = { version = \"*\", default-features = false }\nprotobuf = { version = \"3\", default-features = false }\ntiny_http = { version = \"*\", default-features = false }\n# Sharing\naes-gcm = { version = \"0.10\", default-features = false, features = [\"aes\", \"alloc\"] }\nrand = { version = \"0.8\", default-features = false, features = [\"std\", \"std_rng\"] }\nbase64 = { version = \"0.22\", default-features = false, features = [\"std\"] }\n\n[dev-dependencies]\npretty_assertions = { version= \"*\", default-features = false, features = [\"alloc\"] }\n\n[profile.release]\n# Too slow and does not worth it\nlto = false\n\n[lints.clippy]\nneedless_return = \"allow\"\ntype_complexity = \"allow\"\nuninlined_format_args = \"allow\"\n\n[lints.rust]\nelided_lifetimes_in_paths = \"deny\"\n"
  },
  {
    "path": "Documentation/Actions.md",
    "content": "### Actions\n\n`chdig` supports lots of actions, some has shortcut, others available only in\n`Ctlr-P` (fuzzy search by all actions) (also there is `F8` for query actions\nand `F2` for global actions, if you prefer old school).\n\n### Shortcuts\n\nHere is a list of available shortcuts\n\n| Category        | Shortcut      | Description                                   |\n|-----------------|---------------|-----------------------------------------------|\n| Global Shortcuts| **F1**        | Show help                                     |\n|                 | **F2**        | Views                                         |\n|                 | **F8**        | Show actions                                  |\n|                 | **Ctrl-p**    | Fuzzy actions                                 |\n|                 | **F**         | CPU Server Flamegraph                         |\n|                 |               | Real Server Flamegraph                        |\n|                 |               | Memory Server Flamegraph                      |\n|                 |               | Memory Sample Server Flamegraph               |\n|                 |               | Jemalloc Sample Server Flamegraph             |\n|                 |               | Events Server Flamegraph                      |\n|                 |               | Live Server Flamegraph                        |\n|                 |               | CPU Server Flamegraph in speedscope           |\n|                 |               | Real Server Flamegraph in speedscope          |\n|                 |               | Memory Server Flamegraph in speedscope        |\n|                 |               | Memory Sample Server Flamegraph in speedscope |\n|                 |               | Jemalloc Sample Server Flamegraph in speedscope|\n|                 |               | Events Server Flamegraph in speedscope        |\n|                 |               | Live Server Flamegraph in speedscope          |\n| Actions         | **<Space>**   | Select                                        |\n|                 | **-**         | Show all queries                              |\n|                 | **+**         | Show queries on shards                        |\n|                 | **/**         | Filter                                        |\n|                 |               | Query details                                 |\n|                 |               | Query profile events                          |\n|                 | **P**         | Query processors                              |\n|                 | **v**         | Query views                                   |\n|                 | **C**         | Show CPU flamegraph                           |\n|                 | **R**         | Show Real flamegraph                          |\n|                 | **M**         | Show memory flamegraph                        |\n|                 |               | Show memory sample flamegraph                 |\n|                 |               | Show jemalloc sample flamegraph               |\n|                 |               | Show events flamegraph                        |\n|                 | **L**         | Show live flamegraph                          |\n|                 |               | Show CPU flamegraph in speedscope             |\n|                 |               | Show Real flamegraph in speedscope            |\n|                 |               | Show memory flamegraph in speedscope          |\n|                 |               | Show memory sample flamegraph in speedscope   |\n|                 |               | Show jemalloc sample flamegraph in speedscope |\n|                 |               | Show events flamegraph in speedscope          |\n|                 |               | Show live flamegraph in speedscope            |\n|                 | **Alt+E**     | Edit query and execute                        |\n|                 | **S**         | Show query                                    |\n|                 | **y**         | Copy query to clipboard                       |\n|                 | **s**         | `EXPLAIN SYNTAX`                              |\n|                 | **e**         | `EXPLAIN PLAN`                                |\n|                 | **E**         | `EXPLAIN PIPELINE`                            |\n|                 | **G**         | `EXPLAIN PIPELINE graph=1` (open in browser)  |\n|                 | **I**         | `EXPLAIN INDEXES`                             |\n|                 | **K**         | `KILL` query                                  |\n|                 | **l**         | Show query logs                               |\n|                 | **(**         | Increase number of queries to render to 20    |\n|                 | **)**         | Decrease number of queries to render to 20    |\n| Logs            | **-**         | Turn ON/OFF options:                          |\n|                 |               | - `S` - toggle wrap mode                      |\n|                 | **/**         | Forward search                                |\n|                 | **?**         | Reverse search                                |\n|                 | **s**         | Save logs to file                             |\n|                 | **n**/**N**   | Move to next/previous match                   |\n| Basic navigation| **j**/**k**   | Down/Up                                       |\n|                 | **G**/**g**   | Move to the end/Move to the beginning         |\n|                 | **PageDown**/**PageUp**| Move to the end/Move to the beginning|\n|                 | **Home**      | Reset selection/follow item in table          |\n| chdig controls  | **Esc**       | Back/Quit                                     |\n|                 | **q**         | Back/Quit                                     |\n|                 | **Q**         | Quit forcefully                               |\n|                 | **Backspace** | Back                                          |\n|                 | **p**         | Toggle pause                                  |\n|                 | **r**         | Refresh                                       |\n|                 | **T**         | Seek 10 mins backward                         |\n|                 | **t**         | Seek 10 mins forward                          |\n|                 | **Alt+t**     | Set time interval                             |\n|                 | **~**         | chdig debug console                           |\n"
  },
  {
    "path": "Documentation/Bugs.md",
    "content": "### `--history` is broken in some versions\n\nThe reason is that in some ClickHouse versions merge() function ignore aliases.\n"
  },
  {
    "path": "Documentation/Developers.md",
    "content": "## Developer Documentation\n\n### Debugging async code with tokio-console\n\nchdig supports [tokio-console](https://github.com/tokio-rs/console) for debugging async tasks and runtime behavior.\n\nTo enable tokio console support:\n\n1. Build with the `tokio-console` feature:\n   ```bash\n   cargo build --features tokio-console\n   ```\n\n2. Run chdig:\n   ```bash\n   cargo run --features tokio-console\n   ```\n\n3. In a separate terminal, start tokio-console:\n   ```bash\n   # Install if needed\n   cargo install tokio-console\n\n   # Connect to the running application\n   tokio-console\n   ```\n"
  },
  {
    "path": "Documentation/FAQ.md",
    "content": "### What is format of the URL accepted by `chdig`?\n\nThe simplest form is just - **`localhost`**\n\nFor a secure connections with user and password _(note: passing the password on\nthe command line is not safe)_, use:\n\n```sh\nchdig -u 'user:password@clickhouse-host.com/?secure=true'\n```\n\nA full list of supported connection options is available [here](https://github.com/azat-rust/clickhouse-rs/?tab=readme-ov-file#dns).\n\n_Note: This link currently points to my fork, as some changes have not yet been accepted upstream._\n\n### Environment variables\n\nA safer way to pass the password is via environment variables:\n\n\n```sh\nexport CLICKHOUSE_USER='user'\nexport CLICKHOUSE_PASSWORD='password'\nchdig -u 'clickhouse-host.com/?secure=true'\n# or specify the port explicitly\nchdig -u 'clickhouse-host.com:9440/?secure=true'\n```\n\n### What is --config (`CLICKHOUSE_CONFIG`)?\n\nThis is standard config for [ClickHouse client](https://clickhouse.com/docs/interfaces/cli#configuration_files), i.e.\n\n```yaml\nuser: foo\npassword: bar\nhost: play\nsecure: true\n```\n\n_See also some examples and possible advanced use cases [here](/tests/configs)_\n\n### What is --connection?\n\n`--connection` allows you to use predefined connections, that is supported by\n`clickhouse-client` ([1], [2]).\n\nHere is an example in `XML` format:\n\n```xml\n<clickhouse>\n    <connections_credentials>\n        <connection>\n            <name>prod</name>\n            <hostname>prod</hostname>\n            <user>default</user>\n            <password>secret</password>\n            <!-- <secure>false</secure> -->\n            <!-- <skip_verify>false</skip_verify> -->\n            <!-- <ca_certificate></ca_certificate> -->\n            <!-- <client_certificate></client_certificate> -->\n            <!-- <client_private_key></client_private_key> -->\n        </connection>\n    </connections_credentials>\n</clickhouse>\n```\n\nOr in `YAML`:\n\n```yaml\n---\nconnections_credentials:\n  prod:\n    name: prod\n    hostname: prod\n    user: default\n    password: secret\n    # secure: false\n    # skip_verify: false\n    # ca_certificate:\n    # client_certificate:\n    # client_private_key:\n```\n\nAnd later, instead of specifying `--url` (with password in plain-text, which is\nhighly not recommended), you can use `chdig --connection prod`.\n\n  [1]: https://github.com/ClickHouse/ClickHouse/pull/45715\n  [2]: https://github.com/ClickHouse/ClickHouse/pull/46480\n\n### What is Perfetto export?\n\nPressing `X` in the queries view exports a timeline visualization to\n[Perfetto UI](https://ui.perfetto.dev) — an open-source trace viewer that\nprovides a zoomable timeline, flamegraph visualization, and SQL-queryable trace\ndata. It runs entirely in the browser.\n\nAn embedded HTTP server starts on port 9001 (lazily, on first export) and serves\nthe binary protobuf trace. The browser opens automatically.\n\nThe export includes data from multiple ClickHouse system tables (when available):\n\n| Source table | What it shows |\n|---|---|\n| In-memory queries | Query duration slices grouped by host/user |\n| `system.opentelemetry_span_log` | Processor pipeline spans |\n| `system.trace_log` (ProfileEvent) | Per-thread counter increments |\n| `system.trace_log` (CPU/Real/Memory) | Stack trace samples (flamegraph in Perfetto) |\n| `system.text_log` | Query log messages grouped by level |\n| `system.query_metric_log` | Per-query metric snapshots |\n| `system.part_log` | Part lifecycle events (NewPart, MergeParts, etc.) |\n| `system.query_thread_log` | Per-thread execution with ProfileEvents |\n\nTables that don't exist are silently skipped — the export works with whatever\ndata is available.\n\nWhen queries are selected with `Space`, only those queries are exported.\n\nTo get the richest traces, enable these ClickHouse settings for the queries you\nwant to analyze:\n\n```sql\nSET\n    opentelemetry_start_trace_probability = 1,\n    opentelemetry_trace_processors = 1,\n    opentelemetry_trace_cpu_scheduling = 1,\n    log_query_threads = 1,\n    trace_profile_events = 1,\n    query_metric_log_interval = 0\n```\n\n- `opentelemetry_start_trace_probability` / `opentelemetry_trace_processors` /\n  `opentelemetry_trace_cpu_scheduling` — enable OpenTelemetry spans for the\n  query execution pipeline (populates `system.opentelemetry_span_log`)\n- `log_query_threads` — log per-thread execution info\n  (populates `system.query_thread_log`)\n- `trace_profile_events` — record ProfileEvent counter increments with\n  timestamps into `system.trace_log`, giving precise per-event timelines\n- `query_metric_log_interval` — controls periodic metric snapshots in\n  `system.query_metric_log` (sampled every N milliseconds). Set to `0` to\n  disable if you prefer the more accurate `trace_profile_events`. Set to e.g.\n  `1000` (1 second) if you want periodic snapshots — note that these are\n  sampled and less precise than `trace_profile_events`, but lighter on overhead\n\n### What is flamegraph?\n\nIt is best to start with [Brendan Gregg's site](https://www.brendangregg.com/flamegraphs.html) for a solid introduction to flamegraphs.\n\nBelow is a description of the various types of flamegraphs available in `chdig`:\n\n- `Real` - Traces are captured at regular intervals (defined by [`query_profiler_real_time_period_ns`](https://clickhouse.com/docs/operations/settings/settings#query_profiler_real_time_period_ns)/[`global_profiler_real_time_period_ns`](https://clickhouse.com/docs/operations/server-configuration-parameters/settings#global_profiler_real_time_period_ns)) for each thread, regardless of whether the thread is actively running on the CPU\n- `CPU` - Traces are captured only when a thread is actively executing on the CPU, based on the interval specified in [`query_profiler_cpu_time_period_ns`](https://clickhouse.com/docs/operations/settings/settings#query_profiler_cpu_time_period_ns)/[`global_profiler_cpu_time_period_ns`](https://clickhouse.com/docs/operations/server-configuration-parameters/settings#global_profiler_cpu_time_period_ns)\n- `Memory` - Traces are captured after each [`memory_profiler_step`](https://clickhouse.com/docs/operations/settings/settings#memory_profiler_step)/[`total_memory_profiler_step`](https://clickhouse.com/docs/operations/server-configuration-parameters/settings#total_memory_profiler_step) bytes are allocated by the query or server\n- `Live` - Real-time visualization of what server is doing now from [`system.stack_trace`](https://clickhouse.com/docs/operations/system-tables/stack_trace)\n\nSee also:\n- [Sampling Query Profiler](https://clickhouse.com/docs/operations/optimizing-performance/sampling-query-profiler)\n\n_Note: for `Memory` `chdig` uses `memory_profiler_step` over `memory_profiler_sample_probability`, since the later is disabled by default_\n\n### Why I see IO wait reported as zero?\n\n- You should ensure that ClickHouse uses one of taskstat gathering methods:\n  - procfs\n  - netlink\n\n- And also for linux 5.14 you should enable `kernel.task_delayacct` sysctl as well.\n\n### How to copy text from `chdig`\n\nBy default `chdig` is started with mouse mode enabled in terminal, you cannot\ncopy with this mode enabled. But, terminals provide a way to disable it\ntemporary by pressing some key (usually it is some combination of `Alt`,\n`Shift` or/and `Ctrl`), so you can find yours press them, and copy.\n\n---\n\nSee also [bugs list](Bugs.md)\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright 2023 Azat Khuzhin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the “Software”), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": "debug ?=\ntarget ?= $(shell rustc -vV | sed -n 's|host: ||p')\n# Parse the target (i.e. aarch64-unknown-linux-musl)\ntarget_os := $(shell echo $(target) | cut -d'-' -f3)\ntarget_libc := $(shell echo $(target) | cut -d'-' -f4)\ntarget_arch := $(shell echo $(target) | cut -d'-' -f1)\nhost_arch := $(shell uname -m)\n\n# Version normalization for deb/rpm:\n# - trim \"v\" prefix\n# - first \"-\" replace with \"+\"\n# - second \"-\" replace with \"~\"\n#\n# Refs: https://www.debian.org/doc/debian-policy/ch-controlfields.html\nCHDIG_VERSION=$(shell git describe | sed -e 's/^v//' -e 's/-/+/' -e 's/-/~/')\n# Refs: https://wiki.archlinux.org/title/Arch_package_guidelines#Package_versioning\nCHDIG_VERSION_ARCH=$(shell git describe | sed -e 's/^v//' -e 's/-/./g')\n\n$(info DESTDIR = $(DESTDIR))\n$(info CHDIG_VERSION = $(CHDIG_VERSION))\n$(info CHDIG_VERSION_ARCH = $(CHDIG_VERSION_ARCH))\n$(info debug = $(debug))\n$(info target = $(target))\n$(info host_arch = $(host_arch))\n\nifdef debug\n  cargo_build_opts :=\n  target_type := debug\nelse\n  cargo_build_opts := --release\n  target_type = release\nendif\n\nifneq ($(target),)\n  cargo_build_opts += --target $(target)\nendif\n\n# Normalize architecture names\nnorm_target_arch := $(shell echo $(target_arch) | sed -e 's/^aarch64$$/arm64/' -e 's/^x86_64$$/amd64/')\nnorm_host_arch := $(shell echo $(host_arch) | sed -e 's/^aarch64$$/arm64/' -e 's/^x86_64$$/amd64/')\n\n$(info Normalized target arch: $(norm_target_arch))\n$(info Normalized host arch: $(norm_host_arch))\n\n# Cross compilation requires some tricks:\n# - use lld linker\n# - explicitly specify path for libstdc++\n# (Also some packages, that you can found in github actions manifests)\n#\n# TODO: allow to use clang/gcc from PATH\nifneq ($(norm_host_arch),$(norm_target_arch))\n  $(info Cross compilation for $(target_arch))\n\n  # Detect the latest lld\n  LLD := $(shell ls /usr/bin/ld.lld /usr/bin/ld.lld-* 2>/dev/null | sort -V | tail -n1)\n  $(info LLD = $(LLD))\n  # Detect the latest clang\n  CLANG := $(shell ls /usr/bin/clang /usr/bin/clang-* 2>/dev/null | grep -e '/clang$$' -e '/clang-[0-9]\\+$$' | sort -V | tail -n1)\n  $(info CLANG = $(CLANG))\n  CLANG_CXX := $(shell ls /usr/bin/clang++ /usr/bin/clang++-* 2>/dev/null | grep -e '/clang++$$' -e '/clang++-[0-9]\\+$$' | sort -V | tail -n1)\n  $(info CLANG_CXX = $(CLANG_CXX))\n\n  export CC := $(CLANG)\n  export CXX := $(CLANG_CXX)\n  export RUSTFLAGS := -C linker=$(LLD)\n\n  # /usr/aarch64-linux-gnu/lib64/ (archlinux aarch64-linux-gnu-gcc)\n  prefix := /usr/$(target_arch)-$(target_os)-gnu/lib\n  ifneq ($(wildcard $(prefix)),)\n    export RUSTFLAGS := $(RUSTFLAGS) -C link-args=-L$(prefix)\n  endif\n  prefix := /usr/$(target_arch)-$(target_os)-gnu/lib64\n  ifneq ($(wildcard $(prefix)),)\n    export RUSTFLAGS := $(RUSTFLAGS) -C link-args=-L$(prefix)\n  endif\n\n  # /usr/lib/gcc-cross/aarch64-linux-gnu/$gcc (ubuntu)\n  latest_gcc_cross_version := $(shell ls -d /usr/lib/gcc-cross/$(target_arch)-$(target_os)-gnu/* 2>/dev/null | sort -V | tail -n1 | xargs -I{} basename {})\n  prefix := /usr/lib/gcc-cross/$(target_arch)-$(target_os)-gnu/$(latest_gcc_cross_version)\n  ifneq ($(wildcard $(prefix)),)\n    export RUSTFLAGS := $(RUSTFLAGS) -C link-args=-L$(prefix)\n  endif\n\n  # NOTE: there is also https://musl.cc/aarch64-linux-musl-cross.tgz\n\n  $(info RUSTFLAGS = $(RUSTFLAGS))\nendif\n\n.PHONY: build build_completion deploy-binary chdig install run \\\n\tdeb rpm archlinux tar packages\n\n# This should be the first target (since \".DEFAULT_GOAL\" is supported only since 3.80+)\ndefault: build\n.DEFAULT_GOAL: default\n\nchdig:\n\tcargo build $(cargo_build_opts)\n\nrun: chdig\n\tcargo run $(cargo_build_opts)\n\nbuild: chdig deploy-binary\n\ntest:\n\t@if command -v cargo-nextest >/dev/null 2>&1; then \\\n\t\tcargo nextest run $(cargo_build_opts); \\\n\telse \\\n\t\tcargo test $(cargo_build_opts); \\\n\tfi\n\nbuild_completion: chdig\n\tcargo run $(cargo_build_opts) -- --completion bash > target/chdig.bash-completion\n\ninstall: chdig build_completion\n\tinstall -m755 -D -t $(DESTDIR)/bin target/$(target)/$(target_type)/chdig\n\tinstall -m644 -D -t $(DESTDIR)/share/bash-completion/completions target/chdig.bash-completion\n\ndeploy-binary: chdig\n\tcp target/$(target)/$(target_type)/chdig target/chdig\n\npackages: build build_completion deb rpm archlinux tar\n\ndeb: build\n\tCHDIG_VERSION=${CHDIG_VERSION} CHDIG_ARCH=${norm_target_arch} nfpm package --config chdig-nfpm.yaml --packager deb\nrpm: build\n\tCHDIG_VERSION=${CHDIG_VERSION} CHDIG_ARCH=${target_arch} nfpm package --config chdig-nfpm.yaml --packager rpm\narchlinux: build\n\tCHDIG_VERSION=${CHDIG_VERSION_ARCH} CHDIG_ARCH=${target_arch} nfpm package --config chdig-nfpm.yaml --packager archlinux\n.ONESHELL:\ntar: archlinux\n\tCHDIG_VERSION=${CHDIG_VERSION_ARCH} CHDIG_ARCH=${target_arch} nfpm package --config chdig-nfpm.yaml --packager archlinux\n\ttmp_dir=$(shell mktemp -d /tmp/chdig-${CHDIG_VERSION}.XXXXXX)\n\techo \"Temporary directory for tar package: $$tmp_dir\"\n\ttar -C $$tmp_dir -vxf chdig-${CHDIG_VERSION_ARCH}-1-${target_arch}.pkg.tar.zst usr\n\t# Strip /tmp/chdig-${CHDIG_VERSION}.XXXXXX and replace it with chdig-${CHDIG_VERSION}\n\t# (and we need to remove leading slash)\n\ttar --show-transformed-names --transform \"s#^$${tmp_dir#/}#chdig-${CHDIG_VERSION}-${target_arch}#\" -vczf chdig-${CHDIG_VERSION}-${target_arch}.tar.gz $$tmp_dir\n\techo rm -fr $$tmp_dir\n\nhelp:\n\t@echo \"Usage: make [debug=1] [target=<TRIPLE>]\"\n"
  },
  {
    "path": "README.md",
    "content": "### chdig\n\nDig into [ClickHouse](https://github.com/ClickHouse/ClickHouse/) with TUI interface.\n\n### Installation\n\n`chdig` is also available as part of `clickhouse` - `clickhouse chdig`, but\nthat version may be slightly outdated.\n\nPre-built packages (`.deb`, `.rpm`, `archlinux`, `.tar.gz`) and standalone\nbinaries for `Linux` and `macOS` are available for both `x86_64` and `aarch64`\narchitectures.\n\nThe latest [unstable release can be found on GitHub](https://github.com/azat/chdig/releases/tag/latest).\n\n*See also the complete list of [releases](https://github.com/azat/chdig/releases).*\n\n<details>\n\n<summary>Package repositories (AUR, Scoop, Homebrew)</summary>\n\n#### archlinux user repository (aur)\n\nAnd also for archlinux there is an aur package:\n- [**chdig-latest-bin**](https://aur.archlinux.org/packages/chdig-latest-bin) - binary artifact of the upstream\n- [chdig-git](https://aur.archlinux.org/packages/chdig-git) - build from sources\n- [chdig-bin](https://aur.archlinux.org/packages/chdig-bin) - binary of the latest stable version\n\n*Note: `chdig-latest-bin` is recommended because it is latest available version and you don't need toolchain to compile*\n\n#### scoop (windows)\n\n```\nscoop bucket add extras\nscoop install extras/chdig\n```\n\n#### brew (macos)\n\n```\nbrew install chdig\n```\n\n</details>\n\n### Demo\n\n[![asciicast](https://github.com/azat/chdig/releases/download/v26.1.1/chdig-v26.1.1.gif)](https://asciinema.org/a/OvQIBpQCAtFU8AyF)\n\n### Motivation\n\nThe idea is came from everyday digging into various ClickHouse issues.\n\nClickHouse has a approximately universe of introspection tools, and it is easy\nto forget some of them. At first I came with some\n[slides](https://azat.sh/presentations/2022-know-your-clickhouse/) and a\npicture (to attract your attention) by analogy to what [Brendan\nGregg](https://www.brendangregg.com/linuxperf.html) did for Linux:\n\n[![Know Your ClickHouse](https://azat.sh/presentations/2022-know-your-clickhouse/Know-Your-ClickHouse.png)](https://azat.sh/presentations/2022-know-your-clickhouse/Know-Your-ClickHouse.png)\n\n*Note, the picture and the presentation had been made in the beginning of 2022,\nso it may not include some new introspection tools*.\n\nBut this requires you to dig into lots of places, and even though during this\nprocess you will learn a lot, it does not solves the problem of forgetfulness.\nSo I came up with this simple TUI interface that tries to make this process\nsimpler.\n\n`chdig` can be used not only to debug some problems, but also just as a regular\nintrospection, like `top` for Linux.\n\n### Features\n\n- `top` like interface (or [`csysdig`](https://github.com/draios/sysdig) to be more precise)\n- [Flamegraphs](Documentation/FAQ.md#what-is-flamegraph) (CPU/Real/Memory/Live) in TUI (thanks to [flamelens](https://github.com/ys-l/flamelens))\n- [Perfetto support](Documentation/FAQ.md#what-is-perfetto-export)\n- Share flamegraphs (using [pastila.nl](https://pastila.nl/) and [speedscope](https://www.speedscope.app/))\n- Share logs via [pastila.nl](https://pastila.nl/)\n- Share query pipelines (using [viz.js](https://github.com/mdaines/viz-js) and [pastila.nl](https://pastila.nl/))\n- Cluster support (`--cluster`) - aggregate data from all hosts in the cluster\n- Historical support (`--history`) - includes rotated `system.*_log_*` tables\n- `clickhouse-client` compatibility (including `--connection`) for options and configuration files\n\nAnd there is a huge bunch of [ideas](https://github.com/azat/chdig/issues).\n\n**Note, this it is in a pre-alpha stage, so everything can be changed (keyboard\nshortcuts, views, color schema and of course features)**\n\n### Requirements\n\nIf something does not work, like you have too old version of `ClickHouse`, consider upgrading.\n\n*Note: the oldest version that had been tested was 21.2 (at some point in time)*\n\n### Build from sources\n\n```\ncargo build\n```\n\n> [!NOTE]\n> If you see an error like `failed to authenticate when downloading repository: git@github.com:azat-rust/cursive`,\n> it is likely because your local Git config is rewriting `https://github.com/` to `git@github.com:`:\n>\n> ```\n> [url \"git@github.com:\"]\n>     insteadOf = https://github.com/\n> ```\n>\n> Cargo's built-in Git library does not handle this case gracefully.\n> You can either remove that config entry or tell Cargo to use the system Git client instead:\n>\n> ```toml\n> # ~/.cargo/config.toml\n> [net]\n> git-fetch-with-cli = true\n> ```\n\nFor development and debugging information, see [Documentation/Developers.md](Documentation/Developers.md).\n\n## References\n\n- [FAQ](Documentation/FAQ.md)\n- [Bugs list](Documentation/Bugs.md)\n- [Shortcuts](Documentation/Actions.md#shortcuts)\n- [Developers](Documentation/Developers.md)\n"
  },
  {
    "path": "chdig-nfpm.yaml",
    "content": "---\nname: \"chdig\"\narch: \"${CHDIG_ARCH}\"\nplatform: \"linux\"\nversion: \"${CHDIG_VERSION}\"\nhomepage: \"https://github.com/azat/chdig\"\nlicense: \"Apache\"\npriority: \"optional\"\nmaintainer: \"Azat Khuzhin <a3at.mail@gmail.com>\"\ndescription: |\n  Dig into ClickHouse queries with TUI interface.\n\ncontents:\n- src: target/chdig\n  dst: /usr/bin/chdig\n  file_info:\n    mode: 0755\n- src: target/chdig.bash-completion\n  dst: /usr/share/bash-completion/completions/chdig\n  file_info:\n    mode: 0644\n- src: README.md\n  dst: /usr/share/doc/chdig/README.md\n  file_info:\n    mode: 0644\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "edition = \"2018\"\n"
  },
  {
    "path": "src/actions.rs",
    "content": "use cursive::{event::Event, theme::Effect, utils::markup::StyledString};\n\n#[derive(Clone)]\npub struct ActionDescription {\n    pub text: &'static str,\n    pub event: Event,\n}\n\nimpl ActionDescription {\n    pub fn event_string(&self) -> String {\n        match self.event {\n            Event::Char(c) => {\n                // - It is hard to understand that nothing is a space\n                // - And it overlaps with no shortcut actions\n                if c == ' ' {\n                    return \"<Space>\".to_string();\n                } else {\n                    return c.to_string();\n                }\n            }\n            Event::CtrlChar(c) => {\n                return format!(\"Ctrl+{}\", c);\n            }\n            Event::AltChar(c) => {\n                return format!(\"Alt+{}\", c);\n            }\n            Event::Key(k) => {\n                return format!(\"{:?}\", k);\n            }\n            Event::Unknown(_) => {\n                return \"\".to_string();\n            }\n            _ => panic!(\"{:?} is not supported\", self.event),\n        }\n    }\n    pub fn preview_styled(&self) -> StyledString {\n        let mut text = StyledString::default();\n        text.append_styled(format!(\"{:>10}\", self.event_string()), Effect::Bold);\n        text.append_plain(format!(\" - {}\\n\", self.text));\n        return text;\n    }\n}\n"
  },
  {
    "path": "src/bin.rs",
    "content": "use anyhow::{Result, anyhow};\nuse backtrace::Backtrace;\nuse flexi_logger::{FileSpec, LogSpecification, Logger};\nuse std::ffi::OsString;\nuse std::panic::{self, PanicHookInfo};\nuse std::sync::Arc;\n\nuse cursive::view::Resizable;\n\nuse crate::{\n    interpreter::{ClickHouse, Context, ContextArc, options},\n    view::Navigation,\n};\n\n// NOTE: hyper also has trace_span() which will not be overwritten\n//\n// FIXME: should be initialize before options, but options prints completion that should be\n// done before terminal switched to raw mode.\nconst DEFAULT_RUST_LOG: &str = \"trace,cursive=info,clickhouse_rs=info,hyper=info,rustls=info\";\n\nfn panic_hook(info: &PanicHookInfo<'_>) {\n    let location = info.location().unwrap();\n\n    let msg = if let Some(s) = info.payload().downcast_ref::<&'static str>() {\n        *s\n    } else if let Some(s) = info.payload().downcast_ref::<String>() {\n        &s[..]\n    } else {\n        \"Box<Any>\"\n    };\n\n    // NOTE: we need to add \\r since the terminal is in raw mode.\n    // (another option is to restore the terminal state with termios)\n    let stacktrace: String = format!(\"{:?}\", Backtrace::new()).replace('\\n', \"\\n\\r\");\n\n    print!(\n        \"\\n\\rthread '<unnamed>' panicked at '{}', {}\\n\\r{}\",\n        msg, location, stacktrace\n    );\n}\n\npub async fn chdig_main_async<I, T>(itr: I) -> Result<()>\nwhere\n    I: IntoIterator<Item = T>,\n    T: Into<OsString> + Clone,\n{\n    let options = options::parse_from(itr)?;\n\n    let mut logger_handle = None;\n    // We start logging to file earlier for better introspection.\n    if let Some(log) = &options.service.log {\n        logger_handle = Some(\n            Logger::try_with_env_or_str(DEFAULT_RUST_LOG)?\n                .log_to_file(FileSpec::try_from(log)?)\n                .format(flexi_logger::with_thread)\n                .start()?,\n        );\n    }\n\n    // Initialize it before any backends (otherwise backend will prepare terminal for TUI app, and\n    // panic hook will clear the screen).\n    let clickhouse = Arc::new(ClickHouse::new(options.clickhouse.clone()).await?);\n\n    let server_warnings = match clickhouse.get_warnings().await {\n        Ok(w) => w,\n        Err(e) => {\n            log::warn!(\"Failed to fetch system.warnings: {}\", e);\n            Vec::new()\n        }\n    };\n\n    panic::set_hook(Box::new(|info| {\n        panic_hook(info);\n    }));\n\n    let backend = cursive::backends::try_default().map_err(|e| anyhow!(e.to_string()))?;\n    let mut siv = cursive::CursiveRunner::new(cursive::Cursive::new(), backend);\n\n    if options.service.log.is_none() {\n        logger_handle = Some(\n            Logger::try_with_env_or_str(DEFAULT_RUST_LOG)?\n                .log_to_writer(cursive_flexi_logger_view::cursive_flexi_logger(&siv))\n                .format(flexi_logger::colored_with_thread)\n                .start()?,\n        );\n    }\n\n    // FIXME: should be initialized before cursive, otherwise on error it clears the terminal.\n    let context: ContextArc = Context::new(options, clickhouse, siv.cb_sink().clone()).await?;\n\n    siv.chdig(context.clone());\n\n    if !server_warnings.is_empty() {\n        let text = server_warnings.join(\"\\n\");\n        siv.add_layer(\n            cursive::views::Dialog::around(cursive::views::ScrollView::new(\n                cursive::views::TextView::new(text),\n            ))\n            .title(\"Server warnings\")\n            .button(\"OK\", |s| {\n                s.pop_layer();\n            })\n            .max_width(80),\n        );\n    }\n\n    log::info!(\"chdig started\");\n    siv.run();\n\n    if let Some(logger_handle) = logger_handle {\n        // Suppress error from the cursive_flexi_logger_view - \"cursive callback sink is closed!\"\n        // Note, cursive_flexi_logger_view does not implements shutdown() so it will not help.\n        logger_handle.set_new_spec(LogSpecification::parse(\"none\")?);\n    }\n\n    return Ok(());\n}\n\nfn collect_args(argc: c_int, argv: *const *const c_char) -> Vec<OsString> {\n    use std::ffi::CStr;\n    unsafe {\n        std::slice::from_raw_parts(argv, argc as usize)\n            .iter()\n            .map(|&ptr| {\n                let c_str = CStr::from_ptr(ptr);\n                let string = c_str.to_string_lossy().into_owned();\n                OsString::from(string)\n            })\n            .collect()\n    }\n}\n\nuse std::os::raw::{c_char, c_int};\n#[unsafe(no_mangle)]\npub extern \"C\" fn chdig_main(argc: c_int, argv: *const *const c_char) -> c_int {\n    #[cfg(feature = \"tokio-console\")]\n    console_subscriber::init();\n\n    tokio::runtime::Builder::new_current_thread()\n        .enable_all()\n        .build()\n        .unwrap()\n        .block_on(chdig_main_async(collect_args(argc, argv)))\n        .unwrap_or_else(|e| {\n            eprintln!(\"{}\", e);\n            std::process::exit(1);\n        });\n    return 0;\n}\n"
  },
  {
    "path": "src/common/mod.rs",
    "content": "mod relative_date_time;\npub mod sparkline;\nmod stopwatch;\n\npub use relative_date_time::RelativeDateTime;\npub use relative_date_time::parse_datetime_or_date;\npub use stopwatch::Stopwatch;\n"
  },
  {
    "path": "src/common/relative_date_time.rs",
    "content": "use chrono::{DateTime, Local, NaiveDate, NaiveDateTime, TimeDelta};\nuse std::{\n    fmt::Display,\n    ops::{AddAssign, SubAssign},\n    str::FromStr,\n};\n\npub fn parse_datetime_or_date(value: &str) -> Result<DateTime<Local>, String> {\n    let mut errors = Vec::new();\n    // Parse without timezone\n    match value.parse::<NaiveDateTime>() {\n        Ok(datetime) => return Ok(datetime.and_local_timezone(Local).unwrap()),\n        Err(err) => errors.push(err),\n    }\n    // Parse *with* timezone\n    match value.parse::<DateTime<Local>>() {\n        Ok(datetime) => return Ok(datetime),\n        Err(err) => errors.push(err),\n    }\n    // Parse as date\n    match value.parse::<NaiveDate>() {\n        Ok(date) => {\n            return Ok(date\n                .and_hms_opt(0, 0, 0)\n                .unwrap()\n                .and_local_timezone(Local)\n                .unwrap());\n        }\n        Err(err) => errors.push(err),\n    }\n    return Err(format!(\n        \"Valid RFC3339-formatted (YYYY-MM-DDTHH:MM:SS[.ssssss][±hh:mm|Z]) datetime or date while parsing '{}':\\n{}\",\n        value,\n        errors\n            .iter()\n            .map(|e| e.to_string())\n            .collect::<Vec<String>>()\n            .join(\"\\n\")\n    ));\n}\n\n#[derive(Clone, Debug)]\npub struct RelativeDateTime {\n    date_time: Option<DateTime<Local>>,\n    // Always subtracted\n    offset: Option<TimeDelta>,\n}\n\nimpl RelativeDateTime {\n    pub fn new(offset: Option<TimeDelta>) -> Self {\n        Self {\n            date_time: None,\n            offset,\n        }\n    }\n\n    pub fn get_date_time(&self) -> Option<DateTime<Local>> {\n        self.date_time\n    }\n\n    pub fn to_editable_string(&self) -> String {\n        match (&self.date_time, &self.offset) {\n            (None, Some(offset)) => {\n                humantime::format_duration(offset.to_std().unwrap_or_default()).to_string()\n            }\n            (Some(dt), _) => dt.format(\"%Y-%m-%dT%H:%M:%S\").to_string(),\n            (None, None) => String::new(),\n        }\n    }\n\n    pub fn to_sql_datetime_64(&self) -> Option<String> {\n        match (self.date_time, self.offset) {\n            (Some(date_time), Some(offset)) => Some(format!(\n                \"fromUnixTimestamp64Nano({}) - INTERVAL {} NANOSECOND\",\n                date_time.timestamp_nanos_opt()?,\n                offset.num_nanoseconds()?\n            )),\n            (None, Some(offset)) => Some(format!(\n                \"now() - INTERVAL {} NANOSECOND\",\n                offset.num_nanoseconds()?\n            )),\n            (Some(date_time), None) => Some(format!(\n                \"fromUnixTimestamp64Nano({})\",\n                date_time.timestamp_nanos_opt()?\n            )),\n            (None, None) => Some(\"now()\".to_string()),\n        }\n    }\n}\n\nimpl From<DateTime<Local>> for RelativeDateTime {\n    fn from(value: DateTime<Local>) -> Self {\n        RelativeDateTime {\n            date_time: Some(value),\n            offset: None,\n        }\n    }\n}\n\nimpl From<Option<DateTime<Local>>> for RelativeDateTime {\n    fn from(value: Option<DateTime<Local>>) -> Self {\n        RelativeDateTime {\n            date_time: value,\n            offset: None,\n        }\n    }\n}\n\nimpl FromStr for RelativeDateTime {\n    type Err = anyhow::Error;\n\n    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {\n        // Empty string is a special case for relative \"now\"\n        // (i.e. it will be always calculated from current time)\n        if s.is_empty() {\n            Ok(RelativeDateTime {\n                date_time: None,\n                offset: None,\n            })\n        } else if let Ok(datetime) = parse_datetime_or_date(s) {\n            Ok(RelativeDateTime {\n                date_time: Some(datetime),\n                offset: None,\n            })\n        } else {\n            Ok(RelativeDateTime {\n                date_time: None,\n                offset: Some(TimeDelta::from_std(\n                    s.parse::<humantime::Duration>()?.into(),\n                )?),\n            })\n        }\n    }\n}\n\nimpl From<RelativeDateTime> for DateTime<Local> {\n    fn from(value: RelativeDateTime) -> Self {\n        let mut date_time = value.date_time.unwrap_or(Local::now());\n        if let Some(offset) = value.offset {\n            date_time -= offset;\n        }\n        return date_time;\n    }\n}\n\nimpl Display for RelativeDateTime {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.write_fmt(format_args!(\n            \"{:?} (offset={:?})\",\n            self.date_time, self.offset\n        ))\n    }\n}\n\nimpl AddAssign<TimeDelta> for RelativeDateTime {\n    fn add_assign(&mut self, rhs: TimeDelta) {\n        self.offset = Some(rhs);\n    }\n}\n\nimpl SubAssign<TimeDelta> for RelativeDateTime {\n    fn sub_assign(&mut self, rhs: TimeDelta) {\n        self.offset = Some(rhs);\n    }\n}\n"
  },
  {
    "path": "src/common/sparkline.rs",
    "content": "use std::collections::VecDeque;\n\nconst BLOCKS: &[char] = &['▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'];\n\npub struct SparklineBuffer {\n    data: VecDeque<f64>,\n    capacity: usize,\n}\n\nimpl SparklineBuffer {\n    pub fn new(capacity: usize) -> Self {\n        Self {\n            data: VecDeque::with_capacity(capacity),\n            capacity,\n        }\n    }\n\n    pub fn push(&mut self, value: f64) {\n        if self.data.len() == self.capacity {\n            self.data.pop_front();\n        }\n        self.data.push_back(value);\n    }\n\n    pub fn render(&self, width: usize) -> String {\n        if self.data.is_empty() {\n            return String::new();\n        }\n\n        let samples: Vec<f64> = self\n            .data\n            .iter()\n            .rev()\n            .take(width)\n            .copied()\n            .collect::<Vec<_>>()\n            .into_iter()\n            .rev()\n            .collect();\n\n        let min = samples.iter().copied().fold(f64::INFINITY, f64::min);\n        let max = samples.iter().copied().fold(f64::NEG_INFINITY, f64::max);\n        let range = max - min;\n\n        samples\n            .iter()\n            .map(|&v| {\n                if range == 0.0 {\n                    BLOCKS[BLOCKS.len() / 2]\n                } else {\n                    let idx = ((v - min) / range * (BLOCKS.len() - 1) as f64).round() as usize;\n                    BLOCKS[idx.min(BLOCKS.len() - 1)]\n                }\n            })\n            .collect()\n    }\n}\n"
  },
  {
    "path": "src/common/stopwatch.rs",
    "content": "/// Stupid and simple implementation of stopwatch.\nuse std::time::{Duration, Instant};\n\npub struct Stopwatch {\n    start_time: Instant,\n}\n\nimpl Stopwatch {\n    pub fn start_new() -> Stopwatch {\n        Stopwatch {\n            start_time: Instant::now(),\n        }\n    }\n\n    pub fn elapsed_ms(&self) -> u64 {\n        return self.elapsed().as_millis() as u64;\n    }\n\n    pub fn elapsed(&self) -> Duration {\n        return self.start_time.elapsed();\n    }\n}\n"
  },
  {
    "path": "src/interpreter/background_runner.rs",
    "content": "use std::sync::{Arc, Condvar, Mutex, atomic};\nuse std::thread;\nuse std::time::Duration;\n\n/// Runs periodic tasks in background thread.\n///\n/// It is OK to suppress unused warning for this code, since it join the thread in drop()\n/// correctly, example:\n///\n/// ``rust\n/// pub struct SomeView {\n///     #[allow(unused)]\n///     bg_runner: BackgroundRunner,\n/// }\n/// ``\n///\npub struct BackgroundRunner {\n    interval: Duration,\n    thread: Option<thread::JoinHandle<()>>,\n    force: Arc<atomic::AtomicBool>,\n    exit: Arc<Mutex<bool>>,\n    cv: Arc<(Mutex<()>, Condvar)>,\n}\n\nimpl Drop for BackgroundRunner {\n    fn drop(&mut self) {\n        log::debug!(\"Stopping updates\");\n        *self.exit.lock().unwrap() = true;\n        self.cv.1.notify_all();\n        self.thread.take().unwrap().join().unwrap();\n        log::debug!(\"Updates stopped\");\n    }\n}\n\nimpl BackgroundRunner {\n    pub fn new(\n        interval: Duration,\n        cv: Arc<(Mutex<()>, Condvar)>,\n        force: Arc<atomic::AtomicBool>,\n    ) -> Self {\n        return Self {\n            interval,\n            thread: None,\n            force,\n            exit: Arc::new(Mutex::new(false)),\n            cv,\n        };\n    }\n\n    pub fn start<C: Fn(bool) + std::marker::Send + 'static>(&mut self, callback: C) {\n        let interval = self.interval;\n        let cv = self.cv.clone();\n        let exit = self.exit.clone();\n        let force = self.force.clone();\n        self.thread = Some(std::thread::spawn(move || {\n            loop {\n                let was_force = force.swap(false, atomic::Ordering::SeqCst);\n                callback(was_force);\n\n                if *exit.lock().unwrap() {\n                    break;\n                }\n\n                let _ = cv.1.wait_timeout(cv.0.lock().unwrap(), interval).unwrap();\n                if *exit.lock().unwrap() {\n                    break;\n                }\n            }\n        }));\n        // Explicitly trigger at least one update with force\n        self.schedule();\n    }\n\n    pub fn schedule(&mut self) {\n        self.force.store(true, atomic::Ordering::SeqCst);\n        self.cv.1.notify_all();\n    }\n}\n"
  },
  {
    "path": "src/interpreter/clickhouse.rs",
    "content": "use crate::{\n    common::RelativeDateTime,\n    interpreter::{\n        ClickHouseAvailableQuirks, ClickHouseQuirks,\n        options::{ClickHouseOptions, LogsOrder},\n    },\n};\nuse anyhow::{Error, Result};\nuse chrono::{DateTime, Local};\nuse chrono_tz::Tz;\nuse clickhouse_rs::{\n    Block, Options, Pool,\n    types::{Complex, FromSql},\n};\nuse futures_util::StreamExt;\nuse std::collections::HashMap;\nuse std::str::FromStr;\n\n// TODO:\n// - implement parsing using serde\n// - replace clickhouse_rs::client_info::write() (with extend crate) to change the client name\n// - escape parameters\n\npub type Columns = Block<Complex>;\n\npub struct ClickHouse {\n    pub quirks: ClickHouseQuirks,\n    // Server has use_shared_merge_tree_log_pipeline enabled (SharedMergeTree-backed system.*_log).\n    // When true, system.*_log reads do not need clusterAllReplicas(): one replica sees all rows.\n    shared_log_pipeline: bool,\n    options: ClickHouseOptions,\n    pool: Pool,\n}\n\n#[derive(Debug, PartialEq, Clone)]\n#[allow(clippy::upper_case_acronyms)]\npub enum TraceType {\n    CPU,\n    Real,\n    Memory,\n    MemorySample,\n    JemallocSample,\n    ProfileEvent,\n    MemoryAllocatedWithoutCheck,\n}\n\n#[derive(Debug, Clone)]\npub struct TextLogArguments {\n    pub query_ids: Option<Vec<String>>,\n    pub logger_names: Option<Vec<String>>,\n    pub hostname: Option<String>,\n    pub message_filter: Option<String>,\n    pub max_level: Option<String>,\n    pub start: DateTime<Local>,\n    pub end: RelativeDateTime,\n}\n\n#[derive(Default)]\npub struct ClickHouseServerCPU {\n    pub count: u64,\n    pub user: u64,\n    pub system: u64,\n}\n/// NOTE: Likely misses threads for IO\n#[derive(Default)]\npub struct ClickHouseServerThreadPools {\n    pub merges_mutations: u64,\n    pub fetches: u64,\n    pub common: u64,\n    pub moves: u64,\n    pub schedule: u64,\n    pub buffer_flush: u64,\n    pub distributed: u64,\n    pub message_broker: u64,\n    pub backups: u64,\n    pub io: u64,\n    pub remote_io: u64,\n    pub queries: u64,\n}\n#[derive(Default)]\npub struct ClickHouseServerThreads {\n    pub os_total: u64,\n    pub os_runnable: u64,\n    pub tcp: u64,\n    pub http: u64,\n    pub interserver: u64,\n    pub pools: ClickHouseServerThreadPools,\n}\n#[derive(Default)]\npub struct ClickHouseServerMemory {\n    pub os_total: u64,\n    pub resident: u64,\n\n    pub tracked: u64,\n    pub tables: u64,\n    pub caches: u64,\n    pub queries: u64,\n    pub merges_mutations: u64,\n    pub active_merges: u64,\n    pub async_inserts: u64,\n    pub dictionaries: u64,\n    pub primary_keys: u64,\n    pub fragmentation: u64,\n    pub index_granularity: u64,\n    pub io: u64,\n}\n/// May have duplicated accounting (due to bridges and stuff)\n#[derive(Default)]\npub struct ClickHouseServerNetwork {\n    pub send_bytes: u64,\n    pub receive_bytes: u64,\n}\n#[derive(Default)]\npub struct ClickHouseServerUptime {\n    pub _os: u64,\n    pub server: u64,\n}\n/// May does not take into account some block devices (due to filter by sd*/nvme*/vd*)\n#[derive(Default)]\npub struct ClickHouseServerBlockDevices {\n    pub read_bytes: u64,\n    pub write_bytes: u64,\n}\n#[derive(Default)]\npub struct ClickHouseServerStorages {\n    pub buffer_bytes: u64,\n    // Replace with bytes once [1] will be merged.\n    //\n    //   [1]: https://github.com/ClickHouse/ClickHouse/pull/50238\n    pub distributed_insert_files: u64,\n    pub total_rows: u64,\n    pub total_bytes: u64,\n}\n#[derive(Default)]\npub struct ClickHouseServerRows {\n    pub selected: u64,\n    pub inserted: u64,\n}\n#[derive(Default)]\npub struct ClickHouseServerSummary {\n    pub queries: u64,\n    pub merges: u64,\n    pub mutations: u64,\n    pub replication_queue: u64,\n    pub replication_queue_tries: u64,\n    pub fetches: u64,\n    pub servers: u64,\n    pub rows: ClickHouseServerRows,\n    pub storages: ClickHouseServerStorages,\n    pub uptime: ClickHouseServerUptime,\n    pub memory: ClickHouseServerMemory,\n    pub cpu: ClickHouseServerCPU,\n    pub threads: ClickHouseServerThreads,\n    pub network: ClickHouseServerNetwork,\n    pub blkdev: ClickHouseServerBlockDevices,\n    pub update_interval: u64,\n}\n\npub struct QueryMetricRow {\n    pub host_name: String,\n    pub timestamp_ns: u64,\n    pub memory_usage: i64,\n    pub peak_memory_usage: i64,\n    pub profile_events: HashMap<String, u64>,\n}\n\npub struct MetricLogRow {\n    pub timestamp_ns: u64,\n    pub profile_events: HashMap<String, u64>,\n    pub current_metrics: HashMap<String, i64>,\n}\n\nfn collect_values<'b, T: FromSql<'b>>(block: &'b Columns, column: &str) -> Vec<T> {\n    return (0..block.row_count())\n        .map(|i| block.get(i, column).unwrap())\n        .collect();\n}\n\nconst CHDIG_CLIENT_NAME: [&str; 2] = [\"chdig\", env!(\"CARGO_PKG_VERSION\")];\nfn get_client_name() -> String {\n    return CHDIG_CLIENT_NAME.join(\"-\");\n}\n\nimpl ClickHouse {\n    pub async fn new(options: ClickHouseOptions) -> Result<Self> {\n        let url = format!(\n            \"{}&client_name={}\",\n            options.url.clone().unwrap(),\n            get_client_name()\n        );\n        let connect_options: Options = Options::from_str(&url)?\n            .with_setting(\n                \"storage_system_stack_trace_pipe_read_timeout_ms\",\n                1000,\n                /* is_important= */ false,\n            )\n            // FIXME: ClickHouse's analyzer does not handle ProfileEvents.Names (and similar), it throws:\n            //\n            //   Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got LowCardinality(String)\n            //\n            .with_setting(\"allow_experimental_analyzer\", false, true)\n            // TODO: add support of Map type for LowCardinality in the driver\n            .with_setting(\"low_cardinality_allow_in_native_format\", false, true);\n        let pool = Pool::new(connect_options);\n\n        let mut handle = pool.get_handle().await.map_err(|e| {\n            Error::msg(format!(\n                \"Cannot connect to ClickHouse at {} ({})\",\n                options.url_safe, e\n            ))\n        })?;\n\n        let version = if let Some(override_version) = &options.server_version {\n            override_version.clone()\n        } else {\n            let version = handle\n                .query(\"SELECT version()\")\n                .fetch_all()\n                .await?\n                .get::<String, _>(0, 0)?;\n\n            // Get VERSION_DESCRIBE from system.build_options for full version info (only build_options\n            // include version prefix, i.e. -stable/-testing)\n            handle\n                .query(\"SELECT value FROM system.build_options WHERE name = 'VERSION_DESCRIBE'\")\n                .fetch_all()\n                .await?\n                .get::<String, _>(0, 0)\n                .unwrap_or_else(|_| version.clone())\n        };\n\n        let quirks = ClickHouseQuirks::new(version);\n\n        // SMT-backed system.*_log (ClickHouse Cloud) exposes all replicas' rows through any single\n        // replica, so clusterAllReplicas() is pure overhead there. The setting is off by default\n        // and on self-hosted clusters, so we silently fall back to the cluster-wrapped path.\n        let shared_log_pipeline = handle\n            .query(\n                \"SELECT value FROM system.server_settings \\\n                 WHERE name = 'use_shared_merge_tree_log_pipeline'\",\n            )\n            .fetch_all()\n            .await\n            .ok()\n            .filter(|block| block.row_count() > 0)\n            .and_then(|block| block.get::<String, _>(0, 0).ok())\n            .map(|v| v == \"1\" || v.eq_ignore_ascii_case(\"true\"))\n            .unwrap_or(false);\n        if shared_log_pipeline {\n            log::info!(\n                \"SharedMergeTree log pipeline detected, skipping clusterAllReplicas() for system.*_log\"\n            );\n        }\n\n        return Ok(ClickHouse {\n            quirks,\n            shared_log_pipeline,\n            options,\n            pool,\n        });\n    }\n\n    pub fn version(&self) -> String {\n        return self.quirks.get_version();\n    }\n\n    pub async fn get_slow_query_log(\n        &self,\n        filter: &String,\n        start: RelativeDateTime,\n        end: RelativeDateTime,\n        limit: u64,\n        selected_host: Option<&String>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"query_log\");\n        let host_filter = self.get_log_host_filter_clause(selected_host);\n        return self\n            .execute(\n                format!(\n                    r#\"\n                    WITH\n                        {start} AS start_,\n                        {end}   AS end_,\n                        slow_queries_ids AS (\n                            SELECT DISTINCT initial_query_id\n                            FROM {db_table}\n                            WHERE\n                                event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                                event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                                is_initial_query AND\n                                /* To make query faster */\n                                query_duration_ms > 1e3\n                                {filter}\n                                {internal}\n                                {host_filter}\n                            ORDER BY query_duration_ms DESC\n                            LIMIT {limit}\n                        )\n                    SELECT\n                        ProfileEvents.Names,\n                        ProfileEvents.Values,\n                        Settings.Names,\n                        Settings.Values,\n                        {peak_threads_usage} AS peak_threads_usage,\n                        // Compatibility with system.processlist\n                        memory_usage::Int64 AS peak_memory_usage,\n                        query_duration_ms/1e3 AS elapsed,\n                        user,\n                        is_initial_query,\n                        (exception_code = 394)::UInt8 AS is_cancelled,\n                        initial_query_id,\n                        query_id,\n                        hostname as host_name,\n                        current_database,\n                        query_start_time_microseconds,\n                        event_time_microseconds AS query_end_time_microseconds,\n                        toValidUTF8(query) AS original_query,\n                        normalizeQuery(query) AS normalized_query\n                    FROM {db_table}\n                    PREWHERE\n                        event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                        event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                        type != 'QueryStart' AND\n                        initial_query_id GLOBAL IN slow_queries_ids\n                \"#,\n                    start = start.to_sql_datetime_64().ok_or(Error::msg(\"Invalid start\"))?,\n                    end = end.to_sql_datetime_64().ok_or(Error::msg(\"Invalid end\"))?,\n                    db_table = dbtable,\n                    peak_threads_usage = if self.quirks.has(ClickHouseAvailableQuirks::QueryLogPeakThreadsUsage) {\n                        \"peak_threads_usage\"\n                    } else {\n                        \"length(thread_ids)\"\n                    },\n                    internal = if self.options.internal_queries {\n                        \"\".to_string()\n                    } else {\n                        format!(\"AND client_name != '{}'\", get_client_name())\n                    },\n                    filter = if !filter.is_empty() {\n                        format!(\"AND (client_hostname LIKE '{0}' OR log_comment LIKE '{0}' OR os_user LIKE '{0}' OR user LIKE '{0}' OR initial_user LIKE '{0}' OR client_name LIKE '{0}' OR query_id LIKE '{0}' OR query LIKE '{0}')\", &filter)\n                    } else {\n                        \"\".to_string()\n                    },\n                    host_filter = host_filter,\n                )\n                .as_str(),\n            )\n            .await;\n    }\n\n    pub async fn get_last_query_log(\n        &self,\n        filter: &String,\n        start: RelativeDateTime,\n        end: RelativeDateTime,\n        limit: u64,\n        selected_host: Option<&String>,\n    ) -> Result<Columns> {\n        // TODO:\n        // - propagate sort order from the table\n        // - distributed_group_by_no_merge=2 is broken for this query with WINDOW function\n        let dbtable = self.get_log_table_name(\"system\", \"query_log\");\n        let host_filter = self.get_log_host_filter_clause(selected_host);\n        return self\n            .execute(\n                format!(\n                    r#\"\n                    WITH\n                        {start} AS start_,\n                        {end}   AS end_,\n                        last_queries_ids AS (\n                            SELECT DISTINCT initial_query_id\n                            FROM {db_table}\n                            WHERE\n                                event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                                event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                                is_initial_query\n                                {filter}\n                                {internal}\n                                {host_filter}\n                            ORDER BY event_date DESC, event_time DESC\n                            LIMIT {limit}\n                        )\n                    SELECT\n                        ProfileEvents.Names,\n                        ProfileEvents.Values,\n                        Settings.Names,\n                        Settings.Values,\n                        {peak_threads_usage} AS peak_threads_usage,\n                        // Compatibility with system.processlist\n                        memory_usage::Int64 AS peak_memory_usage,\n                        query_duration_ms/1e3 AS elapsed,\n                        user,\n                        is_initial_query,\n                        (exception_code = 394)::UInt8 AS is_cancelled,\n                        initial_query_id,\n                        query_id,\n                        hostname as host_name,\n                        current_database,\n                        query_start_time_microseconds,\n                        event_time_microseconds AS query_end_time_microseconds,\n                        toValidUTF8(query) AS original_query,\n                        normalizeQuery(query) AS normalized_query\n                    FROM {db_table}\n                    PREWHERE\n                        event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                        event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                        type != 'QueryStart' AND\n                        initial_query_id GLOBAL IN last_queries_ids\n                \"#,\n                    start = start.to_sql_datetime_64().ok_or(Error::msg(\"Invalid start\"))?,\n                    end = end.to_sql_datetime_64().ok_or(Error::msg(\"Invalid end\"))?,\n                    db_table = dbtable,\n                    peak_threads_usage = if self.quirks.has(ClickHouseAvailableQuirks::QueryLogPeakThreadsUsage) {\n                        \"peak_threads_usage\"\n                    } else {\n                        \"length(thread_ids)\"\n                    },\n                    internal = if self.options.internal_queries {\n                        \"\".to_string()\n                    } else {\n                        format!(\"AND client_name != '{}'\", get_client_name())\n                    },\n                    filter = if !filter.is_empty() {\n                        format!(\"AND (client_hostname LIKE '{0}' OR log_comment LIKE '{0}' OR os_user LIKE '{0}' OR user LIKE '{0}' OR initial_user LIKE '{0}' OR client_name LIKE '{0}' OR query_id LIKE '{0}' OR query LIKE '{0}')\", &filter)\n                    } else {\n                        \"\".to_string()\n                    },\n                    host_filter = host_filter,\n                )\n                .as_str(),\n            )\n            .await;\n    }\n\n    pub async fn get_processlist(\n        &self,\n        filter: String,\n        limit: u64,\n        selected_host: Option<&String>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_table_name_no_history(\"system\", \"processes\");\n        let host_filter = self.get_host_filter_clause(selected_host);\n        return self\n            .execute(\n                format!(\n                    r#\"\n                    SELECT\n                        ProfileEvents.Names,\n                        ProfileEvents.Values,\n                        Settings.Names,\n                        Settings.Values,\n                        {peak_threads_usage} AS peak_threads_usage,\n                        peak_memory_usage,\n                        elapsed / {q} AS elapsed,\n                        user,\n                        is_initial_query,\n                        is_cancelled,\n                        initial_query_id,\n                        query_id,\n                        hostName() AS host_name,\n                        {current_database} AS current_database,\n                        /* NOTE: now64()/elapsed does not have enough precision to handle starting\n                         * time properly, while this column is used for querying system.text_log,\n                         * and it should be the smallest time that we are looking for */\n                        (now64(6) - elapsed - 1) AS query_start_time_microseconds,\n                        now64(6) AS query_end_time_microseconds,\n                        toValidUTF8(query) AS original_query,\n                        normalizeQuery(query) AS normalized_query\n                    FROM {}\n                    WHERE 1\n                    {filter}\n                    {internal}\n                    {host_filter}\n                    LIMIT {limit}\n                \"#,\n                    dbtable,\n                    q = if self.quirks.has(ClickHouseAvailableQuirks::ProcessesElapsed) {\n                        10\n                    } else {\n                        1\n                    },\n                    current_database = if self.quirks.has(ClickHouseAvailableQuirks::ProcessesCurrentDatabase) {\n                        // This is required for EXPLAIN (available since 20.6),\n                        // so EXPLAIN with non-default current_database will be broken from processes view.\n                        \"'default'\"\n                    } else {\n                        \"current_database\"\n                    },\n                    internal = if self.options.internal_queries {\n                        \"\".to_string()\n                    } else {\n                            format!(\"AND client_name != '{}'\", get_client_name())\n                        },\n                    filter = if !filter.is_empty() {\n                        format!(\"AND (client_hostname LIKE '{0}' OR Settings['log_comment'] LIKE '{0}' OR os_user LIKE '{0}' OR user LIKE '{0}' OR initial_user LIKE '{0}' OR client_name LIKE '{0}' OR query_id LIKE '{0}' OR query LIKE '{0}')\", &filter)\n                    } else {\n                        \"\".to_string()\n                    },\n                    peak_threads_usage = if self.quirks.has(ClickHouseAvailableQuirks::ProcessesPeakThreadsUsage) {\n                        \"peak_threads_usage\"\n                    } else {\n                        \"length(thread_ids)\"\n                    },\n                    host_filter = host_filter,\n                )\n                .as_str(),\n            )\n            .await;\n    }\n\n    pub async fn get_summary(\n        &self,\n        selected_host: Option<&String>,\n    ) -> Result<ClickHouseServerSummary> {\n        let host_filter = self.get_host_filter_clause(selected_host);\n        let host_where = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\" WHERE {}\", &host_filter[4..]) // Remove leading \"AND \"\n        };\n\n        let memory_index_granularity_trait = if self.quirks.has(ClickHouseAvailableQuirks::AsynchronousMetricsTotalIndexGranularityBytesInMemoryAllocated) {\n            format!(\"(SELECT sum(index_granularity_bytes_in_memory_allocated) FROM {}{}) AS memory_index_granularity_\", self.get_table_name_no_history(\"system\", \"parts\"), host_where)\n        } else {\n            \"0::UInt64 AS memory_index_granularity_\".to_string()\n        };\n\n        // NOTE: metrics (but not all of them) are deltas, so chdig do not need to reimplement this logic by itself.\n        let block = self\n            .execute(\n                &format!(\n                    r#\"\n                    WITH\n                        -- memory detalization\n                        (SELECT sum(CAST(value AS UInt64)) FROM {metrics} WHERE metric = 'MemoryTracking' {host_filter_and}) AS memory_tracked_,\n                        (SELECT sum(CAST(value AS UInt64)) FROM {metrics} WHERE metric = 'MergesMutationsMemoryTracking' {host_filter_and}) AS memory_merges_mutations_,\n                        (SELECT sum(total_bytes) FROM {tables} WHERE engine IN ('Join','Memory','Buffer','Set') {host_filter_and}) AS memory_tables_,\n                        (SELECT sum(CAST(value AS UInt64)) FROM {asynchronous_metrics} WHERE metric LIKE '%CacheBytes' AND metric NOT LIKE '%Filesystem%' {host_filter_and}) AS memory_async_metrics_caches_,\n                        (SELECT sum(CAST(value AS UInt64)) FROM {metrics} WHERE\n                            metric NOT LIKE '%Filesystem%' AND\n                            (metric LIKE '%CacheBytes' OR metric IN ('IcebergMetadataFilesCacheSize', 'VectorSimilarityIndexCacheSize'))\n                            {host_filter_and}\n                        ) AS memory_metrics_caches_,\n                        (SELECT sum(CAST(memory_usage AS UInt64)) FROM {processes} {host_filter_where})                              AS memory_queries_,\n                        (SELECT sum(CAST(memory_usage AS UInt64)) FROM {merges} {host_filter_where})                                 AS memory_active_merges_,\n                        (SELECT sum(bytes_allocated) FROM {dictionaries} {host_filter_where})                                        AS memory_dictionaries_,\n                        (SELECT sum(total_bytes) FROM {async_inserts} {host_filter_where})                                           AS memory_async_inserts_,\n                        {memory_index_granularity_trait},\n                        (SELECT count() FROM {one} {host_filter_where})                                                              AS servers_,\n                        (SELECT count() FROM {replication_queue} {host_filter_where})                                                AS replication_queue_,\n                        (SELECT sum(num_tries) FROM {replication_queue} {host_filter_where})                                         AS replication_queue_tries_,\n                        (SELECT [sum(total_rows), sum(total_bytes)] FROM (\n                            SELECT\n                                if(engine LIKE 'Shared%', max(total_rows), sum(total_rows)) AS total_rows,\n                                if(engine LIKE 'Shared%', max(total_bytes), sum(total_bytes)) AS total_bytes\n                            FROM {tables}\n                            WHERE has_own_data = 1 {host_filter_and}\n                            GROUP BY database, name, engine\n                        )) AS storage_totals_\n                    SELECT\n                        assumeNotNull(memory_tracked_)                           AS memory_tracked,\n                        assumeNotNull(memory_merges_mutations_)                  AS memory_merges_mutations,\n                        assumeNotNull(memory_tables_)                            AS memory_tables,\n                        assumeNotNull(memory_async_metrics_caches_) + assumeNotNull(memory_metrics_caches_) AS memory_caches,\n                        assumeNotNull(memory_queries_)                           AS memory_queries,\n                        assumeNotNull(memory_active_merges_)                     AS memory_active_merges,\n                        assumeNotNull(memory_dictionaries_)                      AS memory_dictionaries,\n                        assumeNotNull(memory_async_inserts_)                     AS memory_async_inserts,\n                        assumeNotNull(servers_)                                  AS servers,\n                        assumeNotNull(replication_queue_)                        AS replication_queue,\n                        assumeNotNull(replication_queue_tries_)                  AS replication_queue_tries,\n                        assumeNotNull(storage_totals_[1])::UInt64               AS storage_total_rows,\n                        assumeNotNull(storage_totals_[2])::UInt64              AS storage_total_bytes,\n\n                        max2(assumeNotNull(memory_index_granularity_), asynchronous_metrics.memory_index_granularity)::UInt64 AS memory_index_granularity,\n\n                        asynchronous_metrics.*,\n                        events.*,\n                        metrics.*\n                    FROM\n                    (\n                        WITH\n                            -- exclude MD/LVM\n                            metric LIKE '%_sd%' OR metric LIKE '%_nvme%' OR metric LIKE '%_vd%' AS is_disk,\n                            metric LIKE '%vlan%' AS is_vlan\n                        -- NOTE: cast should be after aggregation function since the type is Float64\n                        SELECT\n                            CAST(minIf(value, metric == 'OSUptime') AS UInt64)       AS os_uptime,\n                            CAST(min(uptime()) AS UInt64)                            AS uptime,\n                            -- memory\n                            CAST(coalesce(sumIfOrNull(value, metric == 'CGroupMemoryTotal' and value > 0), sumIf(value, metric == 'OSMemoryTotal')) AS UInt64) AS os_memory_total,\n                            CAST(sumIf(value, metric == 'MemoryResident') AS UInt64) AS memory_resident,\n                            -- May differs from primary_key_bytes_in_memory_allocated from\n                            -- system.parts, since it takes into account only active parts\n                            CAST(sumIf(value,\n                                metric == 'TotalPrimaryKeyBytesInMemoryAllocated'\n                                OR metric == 'TotalProjectionPrimaryKeyBytesInMemoryAllocated'\n                            ) AS UInt64) AS memory_primary_keys,\n                            CAST(sumIf(value,\n                                metric == 'TotalIndexGranularityBytesInMemoryAllocated'\n                                OR metric == 'TotalProjectionIndexGranularityBytesInMemoryAllocated'\n                            ) AS UInt64) AS memory_index_granularity,\n                            CAST((\n                                sumIf(value, metric == 'jemalloc.resident') -\n                                sumIf(value, metric == 'jemalloc.allocated')\n                            ) AS UInt64) AS memory_fragmentation,\n                            -- cpu\n                            CAST(\n                                max2(\n                                    countIf(metric LIKE 'CPUFrequencyMHz%'),\n                                    sumIf(value, metric = 'CGroupMaxCPU')\n                                )\n                            AS UInt64) AS cpu_count,\n                            CAST(\n                                max2(\n                                    sumIf(value, metric LIKE 'OSUserTimeCPU%'),\n                                    sumIf(value, metric = 'OSUserTime')\n                                )\n                            AS UInt64) AS cpu_user,\n                            CAST(\n                                max2(\n                                    sumIf(value, metric LIKE 'OSSystemTimeCPU%'),\n                                    sumIf(value, metric = 'OSSystemTime')\n                                )\n                            AS UInt64) AS cpu_system,\n                            -- threads detalization\n                            CAST(sumIf(value, metric = 'HTTPThreads') AS UInt64)             AS threads_http,\n                            CAST(sumIf(value, metric = 'TCPThreads') AS UInt64)              AS threads_tcp,\n                            CAST(sumIf(value, metric = 'OSThreadsTotal') AS UInt64)          AS threads_os_total,\n                            CAST(sumIf(value, metric = 'OSThreadsRunnable') AS UInt64)       AS threads_os_runnable,\n                            CAST(sumIf(value, metric = 'InterserverThreads') AS UInt64)      AS threads_interserver,\n                            -- network\n                            CAST(sumIf(value, metric LIKE 'NetworkSendBytes%' AND NOT is_vlan) AS UInt64)    AS net_send_bytes,\n                            CAST(sumIf(value, metric LIKE 'NetworkReceiveBytes%' AND NOT is_vlan) AS UInt64) AS net_receive_bytes,\n                            -- block devices\n                            CAST(sumIf(value, metric LIKE 'BlockReadBytes%' AND is_disk) AS UInt64)      AS block_read_bytes,\n                            CAST(sumIf(value, metric LIKE 'BlockWriteBytes%' AND is_disk) AS UInt64)     AS block_write_bytes,\n                            -- update intervals\n                            CAST(anyLastIf(value, metric == 'AsynchronousMetricsUpdateInterval') AS UInt64) AS metrics_update_interval\n                        FROM {asynchronous_metrics}\n                        {host_filter_where}\n                    ) as asynchronous_metrics,\n                    (\n                        SELECT\n                            sumIf(CAST(value AS UInt64), event == 'IOBufferAllocBytes') AS memory_io,\n                            sumIf(CAST(value AS UInt64), event == 'SelectedRows') AS selected_rows,\n                            sumIf(CAST(value AS UInt64), event == 'InsertedRows') AS inserted_rows\n                        FROM {events}\n                        {host_filter_where}\n                    ) as events,\n                    (\n                        SELECT\n                            sumIf(CAST(value AS UInt64), metric == 'Query') AS queries,\n                            sumIf(CAST(value AS UInt64), metric == 'Merge') AS merges,\n                            sumIf(CAST(value AS UInt64), metric == 'PartMutation') AS mutations,\n                            sumIf(CAST(value AS UInt64), metric == 'ReplicatedFetch') AS fetches,\n\n                            sumIf(CAST(value AS UInt64), metric == 'StorageBufferBytes') AS storage_buffer_bytes,\n                            sumIf(CAST(value AS UInt64), metric == 'DistributedFilesToInsert') AS storage_distributed_insert_files,\n\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundMergesAndMutationsPoolTask')    AS threads_merges_mutations,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundFetchesPoolTask')               AS threads_fetches,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundCommonPoolTask')                AS threads_common,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundMovePoolTask')                  AS threads_moves,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundSchedulePoolTask')              AS threads_schedule,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundBufferFlushSchedulePoolTask')   AS threads_buffer_flush,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundDistributedSchedulePoolTask')   AS threads_distributed,\n                            sumIf(CAST(value AS UInt64), metric == 'BackgroundMessageBrokerSchedulePoolTask') AS threads_message_broker,\n                            sumIf(CAST(value AS UInt64), metric IN (\n                                'BackupThreadsActive',\n                                'RestoreThreadsActive',\n                                'BackupsIOThreadsActive'\n                            )) AS threads_backups,\n                            sumIf(CAST(value AS UInt64), metric IN (\n                                'DiskObjectStorageAsyncThreadsActive',\n                                'ThreadPoolRemoteFSReaderThreadsActive',\n                                'StorageS3ThreadsActive'\n                            )) AS threads_remote_io,\n                            sumIf(CAST(value AS UInt64), metric IN (\n                                'IOThreadsActive',\n                                'IOWriterThreadsActive',\n                                'IOPrefetchThreadsActive',\n                                'MarksLoaderThreadsActive'\n                            )) AS threads_io,\n                            sumIf(CAST(value AS UInt64), metric IN (\n                                'QueryPipelineExecutorThreadsActive',\n                                'QueryThread',\n                                'AggregatorThreadsActive',\n                                'StorageDistributedThreadsActive',\n                                'DestroyAggregatesThreadsActive'\n                            )) AS threads_queries\n                        FROM {metrics}\n                        {host_filter_where}\n                    ) as metrics\n                    SETTINGS enable_global_with_statement=0\n                \"#,\n                    metrics=self.get_table_name_no_history(\"system\", \"metrics\"),\n                    events=self.get_table_name_no_history(\"system\", \"events\"),\n                    tables=self.get_table_name_no_history(\"system\", \"tables\"),\n                    processes=self.get_table_name_no_history(\"system\", \"processes\"),\n                    merges=self.get_table_name_no_history(\"system\", \"merges\"),\n                    async_inserts=self.get_table_name_no_history(\"system\", \"asynchronous_inserts\"),\n                    replication_queue=self.get_table_name_no_history(\"system\", \"replication_queue\"),\n                    dictionaries=self.get_table_name_no_history(\"system\", \"dictionaries\"),\n                    asynchronous_metrics=self.get_table_name_no_history(\"system\", \"asynchronous_metrics\"),\n                    one=self.get_table_name_no_history(\"system\", \"one\"),\n\n                    memory_index_granularity_trait=memory_index_granularity_trait,\n                    host_filter_where=host_where,\n                    host_filter_and=host_filter,\n                )\n            )\n            .await?;\n\n        let get = |key: &str| {\n            // By subquery.column\n            if let Ok(value) = block.get::<u64, _>(0, key) {\n                return value;\n            }\n\n            let parts = key.split(\".\").collect::<Vec<&str>>();\n            assert!(parts.len() <= 2);\n            // By column\n            return block.get::<u64, _>(0, parts[parts.len() - 1]).expect(key);\n        };\n\n        return Ok(ClickHouseServerSummary {\n            queries: get(\"metrics.queries\"),\n            merges: get(\"metrics.merges\"),\n            mutations: get(\"metrics.mutations\"),\n            replication_queue: get(\"replication_queue\"),\n            replication_queue_tries: get(\"replication_queue_tries\"),\n            fetches: get(\"metrics.fetches\"),\n            servers: get(\"servers\"),\n\n            uptime: ClickHouseServerUptime {\n                _os: get(\"asynchronous_metrics.os_uptime\"),\n                server: get(\"asynchronous_metrics.uptime\"),\n            },\n\n            rows: ClickHouseServerRows {\n                selected: get(\"events.selected_rows\"),\n                inserted: get(\"events.inserted_rows\"),\n            },\n\n            storages: ClickHouseServerStorages {\n                buffer_bytes: get(\"metrics.storage_buffer_bytes\"),\n                distributed_insert_files: get(\"metrics.storage_distributed_insert_files\"),\n                total_rows: get(\"storage_total_rows\"),\n                total_bytes: get(\"storage_total_bytes\"),\n            },\n\n            memory: ClickHouseServerMemory {\n                os_total: get(\"asynchronous_metrics.os_memory_total\"),\n                resident: get(\"asynchronous_metrics.memory_resident\"),\n\n                tracked: get(\"memory_tracked\"),\n                merges_mutations: get(\"memory_merges_mutations\"),\n                tables: get(\"memory_tables\"),\n                caches: get(\"memory_caches\"),\n                queries: get(\"memory_queries\"),\n                active_merges: get(\"memory_active_merges\"),\n                async_inserts: get(\"memory_async_inserts\"),\n                dictionaries: get(\"memory_dictionaries\"),\n                primary_keys: get(\"asynchronous_metrics.memory_primary_keys\"),\n                fragmentation: get(\"asynchronous_metrics.memory_fragmentation\"),\n                index_granularity: get(\"memory_index_granularity\"),\n                io: get(\"events.memory_io\"),\n            },\n\n            cpu: ClickHouseServerCPU {\n                count: get(\"asynchronous_metrics.cpu_count\"),\n                user: get(\"asynchronous_metrics.cpu_user\"),\n                system: get(\"asynchronous_metrics.cpu_system\"),\n            },\n\n            threads: ClickHouseServerThreads {\n                os_total: get(\"asynchronous_metrics.threads_os_total\"),\n                os_runnable: get(\"asynchronous_metrics.threads_os_runnable\"),\n                http: get(\"asynchronous_metrics.threads_http\"),\n                tcp: get(\"asynchronous_metrics.threads_tcp\"),\n                interserver: get(\"asynchronous_metrics.threads_interserver\"),\n                pools: ClickHouseServerThreadPools {\n                    merges_mutations: get(\"metrics.threads_merges_mutations\"),\n                    fetches: get(\"metrics.threads_fetches\"),\n                    common: get(\"metrics.threads_common\"),\n                    moves: get(\"metrics.threads_moves\"),\n                    schedule: get(\"metrics.threads_schedule\"),\n                    buffer_flush: get(\"metrics.threads_buffer_flush\"),\n                    distributed: get(\"metrics.threads_distributed\"),\n                    message_broker: get(\"metrics.threads_message_broker\"),\n                    backups: get(\"metrics.threads_backups\"),\n                    io: get(\"metrics.threads_io\"),\n                    remote_io: get(\"metrics.threads_remote_io\"),\n                    queries: get(\"metrics.threads_queries\"),\n                },\n            },\n\n            network: ClickHouseServerNetwork {\n                send_bytes: get(\"asynchronous_metrics.net_send_bytes\"),\n                receive_bytes: get(\"asynchronous_metrics.net_receive_bytes\"),\n            },\n\n            blkdev: ClickHouseServerBlockDevices {\n                read_bytes: get(\"asynchronous_metrics.block_read_bytes\"),\n                write_bytes: get(\"asynchronous_metrics.block_write_bytes\"),\n            },\n\n            update_interval: get(\"asynchronous_metrics.metrics_update_interval\"),\n        });\n    }\n\n    pub async fn kill_query(&self, query_id: &str) -> Result<()> {\n        let query = if let Some(cluster) = &self.options.cluster {\n            format!(\n                \"KILL QUERY ON CLUSTER {} WHERE query_id = '{}' SYNC\",\n                cluster, query_id\n            )\n        } else {\n            format!(\"KILL QUERY WHERE query_id = '{}' SYNC\", query_id)\n        };\n        return self.execute_simple(&query).await;\n    }\n\n    pub async fn execute_query(&self, database: &str, query: &str) -> Result<()> {\n        self.execute_simple(&format!(\"USE {}\", database)).await?;\n        return self.execute_simple(query).await;\n    }\n\n    pub async fn explain_syntax(\n        &self,\n        database: &str,\n        query: &str,\n        settings: &HashMap<String, String>,\n    ) -> Result<Vec<String>> {\n        return self\n            .explain(\"SYNTAX\", database, query, Some(settings))\n            .await;\n    }\n\n    pub async fn explain_plan(&self, database: &str, query: &str) -> Result<Vec<String>> {\n        return self.explain(\"PLAN actions=1\", database, query, None).await;\n    }\n\n    pub async fn explain_pipeline(&self, database: &str, query: &str) -> Result<Vec<String>> {\n        return self.explain(\"PIPELINE\", database, query, None).await;\n    }\n\n    pub async fn explain_pipeline_graph(&self, database: &str, query: &str) -> Result<Vec<String>> {\n        return self\n            .explain(\"PIPELINE graph=1\", database, query, None)\n            .await;\n    }\n\n    // NOTE: can we benefit from json=1?\n    pub async fn explain_plan_indexes(&self, database: &str, query: &str) -> Result<Vec<String>> {\n        return self.explain(\"PLAN indexes=1\", database, query, None).await;\n    }\n\n    pub async fn show_create_table(&self, database: &str, table: &str) -> Result<String> {\n        let result = self\n            .execute(&format!(\"SHOW CREATE TABLE {}.{}\", database, table))\n            .await?;\n        let statement: String = collect_values(&result, \"statement\")\n            .into_iter()\n            .next()\n            .unwrap_or_default();\n        return Ok(statement);\n    }\n\n    // TODO: copy all settings from the query\n    async fn explain(\n        &self,\n        what: &str,\n        database: &str,\n        query: &str,\n        settings: Option<&HashMap<String, String>>,\n    ) -> Result<Vec<String>> {\n        self.execute_simple(&format!(\"USE {}\", database)).await?;\n\n        if let Some(settings) = settings {\n            // NOTE: it handles queries with SETTINGS incorrectly, i.e.:\n            //\n            //     SELECT 1 SETTINGS max_threads=1\n            //\n            //     EXPLAIN SYNTAX SELECT 1 SETTINGS max_threads=1 SETTINGS max_threads=1, max_insert_threads=1 ->\n            //     SELECT 1 SETTINGS max_threads=1\n            //\n            // This can be fixed two ways:\n            // - in ClickHouse\n            // - by passing settings in the protocol\n            if !settings.is_empty() {\n                return Ok(collect_values(\n                    &self\n                        .execute(&format!(\n                            \"EXPLAIN {} {} SETTINGS {}\",\n                            what,\n                            query,\n                            settings\n                                .iter()\n                                .map(|kv| format!(\"{}='{}'\", kv.0, kv.1.replace('\\'', \"\\\\\\'\")))\n                                .collect::<Vec<String>>()\n                                .join(\",\")\n                        ))\n                        .await?,\n                    \"explain\",\n                ));\n            }\n        }\n\n        return Ok(collect_values(\n            &self.execute(&format!(\"EXPLAIN {} {}\", what, query)).await?,\n            \"explain\",\n        ));\n    }\n\n    pub async fn get_query_logs(&self, args: &TextLogArguments) -> Result<Columns> {\n        // TODO:\n        // - optional flush, but right now it gives \"blocks should not be empty.\" error\n        //   self.execute(\"SYSTEM FLUSH LOGS\").await;\n        // - configure time interval\n        //\n        // NOTE:\n        // - we cannot use LIVE VIEW, since\n        //   a) they are pretty complex\n        //   b) it does not work in case we monitor the whole cluster\n\n        let dbtable = self.get_log_table_name(\"system\", \"text_log\");\n        let order = if self.options.logs_order == LogsOrder::Desc {\n            \"DESC\"\n        } else {\n            \"ASC\"\n        };\n        return self\n            .execute(\n                format!(\n                    r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({}) AS start_time_,\n                        {} AS end_time_\n                    SELECT\n                        hostname AS host_name,\n                        event_time,\n                        event_time_microseconds,\n                        thread_id,\n                        level::String AS level,\n                        logger_name::String AS logger_name,\n                        query_id::String AS query_id,\n                        message\n                    FROM {}\n                    WHERE\n                            event_date >= toDate(start_time_) AND event_time >= toDateTime(start_time_) AND event_time_microseconds > start_time_\n                        AND event_date <= toDate(end_time_)   AND event_time <= toDateTime(end_time_)   AND event_time_microseconds <= end_time_\n                        {}\n                        {}\n                        {}\n                        {}\n                        {}\n                    ORDER BY event_date {order}, event_time {order}, event_time_microseconds {order}\n                    LIMIT {}\n                    \"#,\n                    args.start\n                        .timestamp_nanos_opt()\n                        .ok_or(Error::msg(\"Invalid start time\"))?,\n                    args.end.to_sql_datetime_64().ok_or(Error::msg(\"Invalid end time\"))?,\n                    dbtable,\n                    if let Some(query_ids) = &args.query_ids {\n                        format!(\"AND query_id IN ('{}')\", query_ids.join(\"','\"))\n                    } else {\n                        \"\".into()\n                    },\n                    if let Some(logger_names) = &args.logger_names {\n                        format!(\"AND ({})\", logger_names.iter().map(|l| format!(\"logger_name LIKE '{}'\", l)).collect::<Vec<_>>().join(\" OR \"))\n                    } else {\n                        \"\".into()\n                    },\n                    if let Some(hostname) = &args.hostname {\n                        format!(\"AND (hostName() = '{0}' OR hostname = '{0}')\", hostname.replace('\\'', \"''\"))\n                    } else {\n                        \"\".into()\n                    },\n                    if let Some(message_filter) = &args.message_filter {\n                        format!(\"AND message LIKE '%{}%'\", message_filter)\n                    } else {\n                        \"\".into()\n                    },\n                    if let Some(max_level) = &args.max_level {\n                        format!(\"AND level <= '{}'\", max_level)\n                    } else {\n                        \"\".into()\n                    },\n                    self.options.limit,\n                )\n                .as_str(),\n            )\n            .await;\n    }\n\n    /// Return query flamegraph in pyspy format for flameshow.\n    /// It is the same format as TSV, but with ' ' delimiter between symbols and weight.\n    pub async fn get_flamegraph(\n        &self,\n        trace_type: TraceType,\n        query_ids: Option<&[String]>,\n        start_microseconds: Option<DateTime<Local>>,\n        end_microseconds: Option<DateTime<Local>>,\n        selected_host: Option<&String>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"trace_log\");\n        let host_filter = self.get_log_host_filter_clause(selected_host);\n        return self\n            .execute(&format!(\n                r#\"\n            WITH\n                {} AS start_time_,\n                {} AS end_time_\n            SELECT\n              {} AS human_trace,\n              {} weight\n            FROM {}\n            WHERE\n                    event_date >= toDate(start_time_) AND event_time >= toDateTime(start_time_) AND event_time_microseconds > start_time_\n                AND event_date <= toDate(end_time_)   AND event_time <= toDateTime(end_time_)   AND event_time_microseconds <= end_time_\n                AND trace_type = '{:?}'\n                {}\n                {}\n            GROUP BY human_trace\n            SETTINGS allow_introspection_functions=1\n            \"#,\n                match start_microseconds {\n                    Some(time) => format!(\n                        \"fromUnixTimestamp64Nano({})\",\n                        time.timestamp_nanos_opt()\n                            .ok_or(Error::msg(\"Invalid start time\"))?\n                    ),\n                    None => \"toDateTime64(now() - INTERVAL 1 HOUR, 6)\".to_string(),\n                },\n                match end_microseconds {\n                    Some(time) => format!(\n                        \"fromUnixTimestamp64Nano({})\",\n                        time.timestamp_nanos_opt()\n                            .ok_or(Error::msg(\"Invalid end time\"))?\n                    ),\n                    None => \"toDateTime64(now(), 6)\".to_string(),\n                },\n                if self.quirks.has(ClickHouseAvailableQuirks::TraceLogHasSymbols) {\n                    r#\"\n                        if(empty(symbols),\n                           arrayStringConcat(arrayMap(\n                             addr -> demangle(addressToSymbol(addr)),\n                             arrayReverse(trace)\n                           ), ';'),\n                           arrayStringConcat(arrayReverse(symbols), ';')\n                        )\n                    \"#\n                } else {\n                    r#\"\n                        arrayStringConcat(arrayMap(\n                          addr -> demangle(addressToSymbol(addr)),\n                          arrayReverse(trace)\n                        ), ';')\n                    \"#\n                },\n                match trace_type {\n                    TraceType::Memory => \"abs(sum(size))\",\n                    TraceType::MemorySample => \"abs(sum(size))\",\n                    TraceType::JemallocSample => \"abs(sum(size))\",\n                    TraceType::MemoryAllocatedWithoutCheck => \"abs(sum(size))\",\n                    _ => \"count()\",\n                },\n                dbtable,\n                trace_type,\n                if let Some(ids) = query_ids {\n                    format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n                } else {\n                    String::new()\n                },\n                host_filter,\n            ))\n            .await;\n    }\n\n    /// Return jemalloc flamegraph in pyspy format.\n    /// It is the same format as TSV, but with ' ' delimiter between symbols and weight.\n    pub async fn get_jemalloc_flamegraph(&self, selected_host: Option<&String>) -> Result<Columns> {\n        let dbtable = self.get_table_name(\"system\", \"jemalloc_profile_text\");\n        let host_filter = if let Some(host) = selected_host {\n            if !host.is_empty() && self.options.cluster.is_some() {\n                format!(\"AND hostName() = '{}'\", host.replace('\\'', \"''\"))\n            } else {\n                String::new()\n            }\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n            WITH splitByChar(' ', line) AS parts\n            SELECT\n                arrayStringConcat(arraySlice(parts, 1, -1), ' ') AS symbols,\n                parts[-1]::UInt64 AS bytes\n            FROM {}\n            WHERE 1 {}\n            SETTINGS jemalloc_profile_text_output_format='collapsed'\n            \"#,\n                dbtable, host_filter,\n            ))\n            .await;\n    }\n\n    pub async fn get_live_query_flamegraph(\n        &self,\n        query_ids: &Option<Vec<String>>,\n        selected_host: Option<&String>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_table_name_no_history(\"system\", \"stack_trace\");\n        let host_filter = self.get_host_filter_clause(selected_host);\n        let where_clause = match (query_ids.as_ref(), host_filter.is_empty()) {\n            (Some(v), true) => format!(\"query_id IN ('{}')\", v.join(\"','\")),\n            (Some(v), false) => format!(\"query_id IN ('{}') {}\", v.join(\"','\"), host_filter),\n            (None, false) => format!(\"1 {}\", host_filter),\n            (None, true) => \"1\".to_string(),\n        };\n        return self\n            .execute(&format!(\n                r#\"\n            SELECT\n              arrayStringConcat(arrayMap(\n                addr -> demangle(addressToSymbol(addr)),\n                arrayReverse(trace)\n              ), ';') AS human_trace,\n              count() weight\n            FROM {}\n            WHERE {}\n            GROUP BY human_trace\n            SETTINGS allow_introspection_functions=1\n            \"#,\n                dbtable, where_clause\n            ))\n            .await;\n    }\n\n    pub async fn get_background_schedule_pool_query_ids(\n        &self,\n        log_name: Option<String>,\n        database: String,\n        table: String,\n        start: RelativeDateTime,\n        end: RelativeDateTime,\n        selected_host: Option<&String>,\n    ) -> Result<Vec<String>> {\n        let dbtable = self.get_log_table_name(\"system\", \"background_schedule_pool_log\");\n\n        let start_sql = start\n            .to_sql_datetime_64()\n            .ok_or_else(|| Error::msg(\"Invalid start\"))?;\n        let end_sql = end\n            .to_sql_datetime_64()\n            .ok_or_else(|| Error::msg(\"Invalid end\"))?;\n\n        let host_filter = self.get_log_host_filter_clause(selected_host);\n\n        let query = if let Some(ref log_name) = log_name {\n            format!(\n                r#\"\n                WITH {start} AS start_, {end} AS end_\n                SELECT DISTINCT query_id\n                FROM {dbtable}\n                WHERE\n                    event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                    event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                    log_name = '{log_name}' AND\n                    database = '{database}' AND\n                    table = '{table}'\n                    {host_filter}\n                LIMIT 1000\n                \"#,\n                start = start_sql,\n                end = end_sql,\n                dbtable = dbtable,\n                log_name = log_name.replace('\\'', \"''\"),\n                database = database.replace('\\'', \"''\"),\n                table = table.replace('\\'', \"''\"),\n                host_filter = host_filter,\n            )\n        } else {\n            format!(\n                r#\"\n                WITH {start} AS start_, {end} AS end_\n                SELECT DISTINCT query_id\n                FROM {dbtable}\n                WHERE\n                    event_date BETWEEN toDate(start_) AND toDate(end_) AND\n                    event_time BETWEEN toDateTime(start_) AND toDateTime(end_) AND\n                    database = '{database}' AND\n                    table = '{table}'\n                    {host_filter}\n                LIMIT 1000\n                \"#,\n                start = start_sql,\n                end = end_sql,\n                dbtable = dbtable,\n                database = database.replace('\\'', \"''\"),\n                table = table.replace('\\'', \"''\"),\n                host_filter = host_filter,\n            )\n        };\n\n        let columns = self.execute(&query).await?;\n        let mut query_ids = Vec::new();\n        for i in 0..columns.row_count() {\n            if let Ok(query_id) = columns.get::<String, _>(i, \"query_id\") {\n                query_ids.push(query_id);\n            }\n        }\n\n        Ok(query_ids)\n    }\n\n    pub async fn get_otel_spans_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"opentelemetry_span_log\");\n        let start_us = start.timestamp_micros();\n        let end_us = end.timestamp_micros();\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\n                \"AND attribute['clickhouse.query_id'] IN ('{}')\",\n                ids.join(\"','\")\n            )\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    SELECT\n                        operation_name,\n                        start_time_us,\n                        finish_time_us,\n                        attribute['clickhouse.query_id'] AS query_id,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE start_time_us BETWEEN {start_us} AND {end_us}\n                      {query_id_filter}\n                    ORDER BY start_time_us\n                    \"#,\n                dbtable = dbtable,\n                start_us = start_us,\n                end_us = end_us,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_trace_log_counters_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"trace_log\");\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        query_id,\n                        event,\n                        increment,\n                        event_time_microseconds,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE trace_type = 'ProfileEvent' AND increment != 0\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_query_metrics_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Vec<QueryMetricRow>> {\n        let dbtable = self.get_log_table_name(\"system\", \"query_metric_log\");\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        let block = self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        query_id,\n                        event_time_microseconds,\n                        memory_usage,\n                        peak_memory_usage,\n                        {host_expr} AS host_name,\n                        COLUMNS('ProfileEvent_')\n                    FROM {dbtable}\n                    WHERE 1\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await?;\n\n        let pe_columns: Vec<String> = block\n            .columns()\n            .iter()\n            .map(|c| c.name().to_string())\n            .filter(|name| name.starts_with(\"ProfileEvent_\"))\n            .collect();\n\n        let mut rows = Vec::with_capacity(block.row_count());\n        for i in 0..block.row_count() {\n            let mut profile_events = HashMap::new();\n            for col in &pe_columns {\n                let value: u64 = block.get(i, col.as_str()).unwrap_or(0);\n                if value != 0 {\n                    let name = col.strip_prefix(\"ProfileEvent_\").unwrap();\n                    profile_events.insert(name.to_string(), value);\n                }\n            }\n            let ts_ns = match block.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: query_metric_log row {} event_time_microseconds: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n            rows.push(QueryMetricRow {\n                host_name: block.get(i, \"host_name\").unwrap_or_default(),\n                timestamp_ns: ts_ns,\n                memory_usage: block.get(i, \"memory_usage\").unwrap_or(0),\n                peak_memory_usage: block.get(i, \"peak_memory_usage\").unwrap_or(0),\n                profile_events,\n            });\n        }\n        Ok(rows)\n    }\n\n    pub async fn get_part_log_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"part_log\");\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        event_type,\n                        event_time_microseconds,\n                        duration_ms,\n                        database,\n                        table,\n                        part_name,\n                        query_id,\n                        rows,\n                        size_in_bytes,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE event_type NOT IN ('MergePartsStart', 'MutatePartStart')\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_stack_traces_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"trace_log\");\n        let symbol_expr = if self\n            .quirks\n            .has(ClickHouseAvailableQuirks::TraceLogHasSymbols)\n        {\n            r#\"arrayReverse(if(empty(symbols),\n                arrayMap(addr -> demangle(addressToSymbol(addr)), trace),\n                symbols))\"#\n        } else {\n            \"arrayReverse(arrayMap(addr -> demangle(addressToSymbol(addr)), trace))\"\n        };\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        event_time_microseconds,\n                        thread_id,\n                        trace_type::String AS trace_type,\n                        {symbol_expr} AS stack,\n                        size,\n                        query_id,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE trace_type IN ('CPU', 'Real', 'Memory')\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    SETTINGS allow_introspection_functions=1\n                    \"#,\n                dbtable = dbtable,\n                symbol_expr = symbol_expr,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_text_log_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"text_log\");\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        event_time_microseconds,\n                        level::String AS level,\n                        logger_name::String AS logger_name,\n                        message,\n                        query_id,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE 1\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_query_thread_log_for_perfetto(\n        &self,\n        query_ids: Option<&[String]>,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"query_thread_log\");\n        let query_id_filter = if let Some(ids) = query_ids {\n            format!(\"AND query_id IN ('{}')\", ids.join(\"','\"))\n        } else {\n            String::new()\n        };\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        query_id,\n                        thread_name,\n                        event_time_microseconds,\n                        query_duration_ms,\n                        ProfileEvents.Names,\n                        ProfileEvents.Values,\n                        peak_memory_usage,\n                        {host_expr} AS host_name\n                    FROM {dbtable}\n                    WHERE 1\n                      {query_id_filter}\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                query_id_filter = query_id_filter,\n                host_expr = self.get_log_hostname_column(),\n            ))\n            .await;\n    }\n\n    pub async fn get_queries_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"query_log\");\n        return self\n            .execute(\n                format!(\n                    r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        ProfileEvents.Names,\n                        ProfileEvents.Values,\n                        Settings.Names,\n                        Settings.Values,\n                        {peak_threads_usage} AS peak_threads_usage,\n                        memory_usage::Int64 AS peak_memory_usage,\n                        query_duration_ms/1e3 AS elapsed,\n                        user,\n                        is_initial_query,\n                        initial_query_id,\n                        query_id,\n                        hostname as host_name,\n                        current_database,\n                        query_start_time_microseconds,\n                        event_time_microseconds AS query_end_time_microseconds,\n                        toValidUTF8(query) AS original_query,\n                        normalizeQuery(query) AS normalized_query\n                    FROM {dbtable}\n                    WHERE type != 'QueryStart'\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    \"#,\n                    start = start\n                        .timestamp_nanos_opt()\n                        .ok_or(Error::msg(\"Invalid start\"))?,\n                    end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n                    dbtable = dbtable,\n                    peak_threads_usage = if self\n                        .quirks\n                        .has(ClickHouseAvailableQuirks::QueryLogPeakThreadsUsage)\n                    {\n                        \"peak_threads_usage\"\n                    } else {\n                        \"length(thread_ids)\"\n                    },\n                )\n                .as_str(),\n            )\n            .await;\n    }\n\n    pub async fn get_metric_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Vec<MetricLogRow>> {\n        let dbtable = self.get_log_table_name(\"system\", \"metric_log\");\n        let block = self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        event_time_microseconds,\n                        COLUMNS('ProfileEvent_'),\n                        COLUMNS('CurrentMetric_')\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await?;\n\n        let pe_columns: Vec<String> = block\n            .columns()\n            .iter()\n            .map(|c| c.name().to_string())\n            .filter(|name| name.starts_with(\"ProfileEvent_\"))\n            .collect();\n        let cm_columns: Vec<String> = block\n            .columns()\n            .iter()\n            .map(|c| c.name().to_string())\n            .filter(|name| name.starts_with(\"CurrentMetric_\"))\n            .collect();\n\n        let mut rows = Vec::with_capacity(block.row_count());\n        for i in 0..block.row_count() {\n            let ts_ns = match block.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: metric_log row {} event_time_microseconds: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n            let mut profile_events = HashMap::new();\n            for col in &pe_columns {\n                let value: u64 = block.get(i, col.as_str()).unwrap_or(0);\n                if value != 0 {\n                    let name = col.strip_prefix(\"ProfileEvent_\").unwrap();\n                    profile_events.insert(name.to_string(), value);\n                }\n            }\n            let mut current_metrics = HashMap::new();\n            for col in &cm_columns {\n                let value: i64 = block.get(i, col.as_str()).unwrap_or(0);\n                if value != 0 {\n                    let name = col.strip_prefix(\"CurrentMetric_\").unwrap();\n                    current_metrics.insert(name.to_string(), value);\n                }\n            }\n            rows.push(MetricLogRow {\n                timestamp_ns: ts_ns,\n                profile_events,\n                current_metrics,\n            });\n        }\n        Ok(rows)\n    }\n\n    pub async fn get_asynchronous_metric_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"asynchronous_metric_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        metric,\n                        value,\n                        event_time_microseconds\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_asynchronous_insert_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"asynchronous_insert_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        database,\n                        table,\n                        format,\n                        status,\n                        bytes,\n                        exception,\n                        event_time_microseconds,\n                        flush_time_microseconds,\n                        query_id\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_error_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"error_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        error,\n                        code,\n                        value,\n                        remote,\n                        last_error_message,\n                        event_time\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_s3_queue_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"s3queue_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    SELECT\n                        file_name,\n                        rows_processed,\n                        status,\n                        processing_start_time,\n                        processing_end_time,\n                        exception\n                    FROM {dbtable}\n                    WHERE processing_start_time >= toDateTime(fromUnixTimestamp64Nano({start}))\n                      AND processing_start_time <= toDateTime(fromUnixTimestamp64Nano({end}))\n                    ORDER BY processing_start_time\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_azure_queue_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"azure_queue_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    SELECT\n                        database,\n                        table,\n                        file_name,\n                        rows_processed,\n                        status,\n                        processing_start_time,\n                        processing_end_time,\n                        exception\n                    FROM {dbtable}\n                    WHERE processing_start_time >= toDateTime(fromUnixTimestamp64Nano({start}))\n                      AND processing_start_time <= toDateTime(fromUnixTimestamp64Nano({end}))\n                    ORDER BY processing_start_time\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_blob_storage_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"blob_storage_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        event_type,\n                        query_id,\n                        disk_name,\n                        bucket,\n                        remote_path,\n                        data_size,\n                        error,\n                        event_time_microseconds\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_background_schedule_pool_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"background_schedule_pool_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        log_name,\n                        database,\n                        table,\n                        query_id,\n                        duration_ms,\n                        error,\n                        exception,\n                        event_time_microseconds\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_session_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"session_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    WITH\n                        fromUnixTimestamp64Nano({start}) AS start_,\n                        fromUnixTimestamp64Nano({end}) AS end_\n                    SELECT\n                        type::String AS type,\n                        user,\n                        auth_type::String AS auth_type,\n                        interface::String AS interface,\n                        toString(client_address) AS client_address,\n                        client_name,\n                        failure_reason,\n                        event_time_microseconds\n                    FROM {dbtable}\n                    WHERE 1\n                      AND event_date >= toDate(start_) AND event_time >= toDateTime(start_)\n                      AND event_date <= toDate(end_)   AND event_time <= toDateTime(end_)\n                    ORDER BY event_time_microseconds\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_aggregated_zookeeper_log_for_perfetto(\n        &self,\n        start: DateTime<Local>,\n        end: DateTime<Local>,\n    ) -> Result<Columns> {\n        let dbtable = self.get_log_table_name(\"system\", \"aggregated_zookeeper_log\");\n        return self\n            .execute(&format!(\n                r#\"\n                    SELECT\n                        event_time,\n                        session_id,\n                        parent_path,\n                        operation::String AS operation,\n                        count,\n                        mapKeys(errors) AS error_names,\n                        mapValues(errors) AS error_counts,\n                        average_latency,\n                        component\n                    FROM {dbtable}\n                    WHERE event_time >= toDateTime(fromUnixTimestamp64Nano({start}))\n                      AND event_time <= toDateTime(fromUnixTimestamp64Nano({end}))\n                    ORDER BY event_time\n                    \"#,\n                dbtable = dbtable,\n                start = start\n                    .timestamp_nanos_opt()\n                    .ok_or(Error::msg(\"Invalid start\"))?,\n                end = end.timestamp_nanos_opt().ok_or(Error::msg(\"Invalid end\"))?,\n            ))\n            .await;\n    }\n\n    pub async fn get_warnings(&self) -> Result<Vec<String>> {\n        let table_exists: u64 = self\n            .execute(\n                \"SELECT count() FROM system.tables WHERE database = 'system' AND name = 'warnings'\",\n            )\n            .await?\n            .get(0, \"count()\")?;\n        if table_exists == 0 {\n            return Ok(Vec::new());\n        }\n\n        let block = self.execute(\"SELECT message FROM system.warnings\").await?;\n        let warnings: Vec<String> = collect_values(&block, \"message\");\n        let filtered: Vec<String> = warnings\n            .into_iter()\n            .filter(|w| !w.contains(\"transparent_hugepage\") && !w.starts_with(\"Obsolete settings\"))\n            .collect();\n        Ok(filtered)\n    }\n\n    pub async fn execute(&self, query: &str) -> Result<Columns> {\n        let columns = self\n            .pool\n            .get_handle()\n            .await?\n            .query(query)\n            .fetch_all()\n            .await?;\n        log::trace!(\"Received {} rows for query: {}\", columns.row_count(), query);\n        Ok(columns)\n    }\n\n    async fn execute_simple(&self, query: &str) -> Result<()> {\n        let mut client = self.pool.get_handle().await?;\n        let mut stream = client.query(query).stream_blocks();\n        let ret = stream.next().await;\n        if let Some(Err(err)) = ret {\n            return Err(Error::new(err));\n        } else {\n            return Ok(());\n        }\n    }\n\n    pub async fn get_cluster_hosts(&self) -> Result<Vec<String>> {\n        let cluster = self.options.cluster.clone().unwrap_or_default();\n        if cluster.is_empty() {\n            return Ok(Vec::new());\n        }\n\n        let query = format!(\n            \"SELECT DISTINCT hostName() AS host FROM clusterAllReplicas('{}', system.one) ORDER BY host\",\n            cluster\n        );\n\n        let columns = self.execute(&query).await?;\n        let mut hosts = Vec::new();\n        for i in 0..columns.row_count() {\n            if let Ok(host) = columns.get::<String, _>(i, \"host\") {\n                hosts.push(host);\n            }\n        }\n\n        Ok(hosts)\n    }\n\n    pub fn get_host_filter_clause(&self, selected_host: Option<&String>) -> String {\n        if let Some(host) = selected_host\n            && !host.is_empty()\n            && self.options.cluster.is_some()\n        {\n            return format!(\"AND hostName() = '{}'\", host.replace('\\'', \"''\"));\n        }\n        String::new()\n    }\n\n    // Filter for system.*_log reads. Without clusterAllReplicas(), hostName() collapses to the\n    // executor node, so we match on the persisted `hostname` column instead.\n    pub fn get_log_host_filter_clause(&self, selected_host: Option<&String>) -> String {\n        if let Some(host) = selected_host\n            && !host.is_empty()\n            && self.options.cluster.is_some()\n        {\n            let col = if self.shared_log_pipeline {\n                \"hostname\"\n            } else {\n                \"hostName()\"\n            };\n            return format!(\"AND {} = '{}'\", col, host.replace('\\'', \"''\"));\n        }\n        String::new()\n    }\n\n    // SELECT-side hostname expression for system.*_log reads. Pairs with get_log_host_filter_clause.\n    pub fn get_log_hostname_column(&self) -> &'static str {\n        if self.shared_log_pipeline {\n            \"hostname\"\n        } else {\n            \"hostName()\"\n        }\n    }\n\n    pub fn get_table_name(&self, database: &str, table: &str) -> String {\n        let cluster = self.options.cluster.clone().unwrap_or_default();\n        let history = self.options.history;\n\n        return match (history, cluster.is_empty()) {\n            (false, true) => format!(\"{}.{}\", database, table),\n            (true, false) => format!(\n                \"clusterAllReplicas('{}', merge('{}', '^{}'))\",\n                cluster, database, table\n            ),\n            (true, true) => format!(\"merge('{}', '^{}')\", database, table),\n            (false, false) => format!(\n                \"clusterAllReplicas('{}', '{}', '{}')\",\n                cluster, database, table\n            ),\n        };\n    }\n\n    // Variant for system.*_log tables. With use_shared_merge_tree_log_pipeline we can skip\n    // clusterAllReplicas() entirely — a single replica observes the whole cluster's rows.\n    pub fn get_log_table_name(&self, database: &str, table: &str) -> String {\n        if self.shared_log_pipeline {\n            let history = self.options.history;\n            return if history {\n                format!(\"merge('{}', '^{}')\", database, table)\n            } else {\n                format!(\"{}.{}\", database, table)\n            };\n        }\n        self.get_table_name(database, table)\n    }\n\n    pub fn get_table_name_no_history(&self, database: &str, table: &str) -> String {\n        let cluster = self.options.cluster.clone().unwrap_or_default();\n        return match cluster.is_empty() {\n            true => format!(\"{}.{}\", database, table),\n            false => format!(\n                \"clusterAllReplicas('{}', '{}', '{}')\",\n                cluster, database, table\n            ),\n        };\n    }\n}\n"
  },
  {
    "path": "src/interpreter/clickhouse_quirks.rs",
    "content": "use semver::{Version, VersionReq};\n\n#[derive(Debug, Clone, Copy)]\npub enum ClickHouseAvailableQuirks {\n    ProcessesElapsed = 1,\n    ProcessesCurrentDatabase = 2,\n    AsynchronousMetricsTotalIndexGranularityBytesInMemoryAllocated = 3,\n    TraceLogHasSymbols = 4,\n    SystemReplicasUUID = 8,\n    QueryLogPeakThreadsUsage = 16,\n    ProcessesPeakThreadsUsage = 32,\n    SystemBackgroundSchedulePool = 64,\n}\n\n// List of quirks (that requires workaround) or new features.\nconst QUIRKS: [(&str, ClickHouseAvailableQuirks); 8] = [\n    // https://github.com/ClickHouse/ClickHouse/pull/46047\n    //\n    // NOTE: I use here 22.13 because I have such version in production, which is more or less the\n    // same as 23.1\n    (\n        \">=22.13, <23.2\",\n        ClickHouseAvailableQuirks::ProcessesElapsed,\n    ),\n    // https://github.com/ClickHouse/ClickHouse/pull/22365\n    (\"<21.4\", ClickHouseAvailableQuirks::ProcessesCurrentDatabase),\n    // https://github.com/ClickHouse/ClickHouse/pull/80861\n    (\n        \">=24.11, <25.6\",\n        ClickHouseAvailableQuirks::AsynchronousMetricsTotalIndexGranularityBytesInMemoryAllocated,\n    ),\n    (\">=25.1\", ClickHouseAvailableQuirks::TraceLogHasSymbols),\n    (\">=25.11\", ClickHouseAvailableQuirks::SystemReplicasUUID),\n    // peak_threads_usage is available in system.query_log since 23.8\n    (\n        \">=23.8\",\n        ClickHouseAvailableQuirks::QueryLogPeakThreadsUsage,\n    ),\n    // peak_threads_usage is available in system.processes since 25.11\n    (\n        \">=25.11\",\n        ClickHouseAvailableQuirks::ProcessesPeakThreadsUsage,\n    ),\n    // peak_threads_usage is available in system.processes since 25.11\n    (\n        \">=25.12\",\n        ClickHouseAvailableQuirks::SystemBackgroundSchedulePool,\n    ),\n];\n\npub struct ClickHouseQuirks {\n    // Return more verbose version for the UI\n    version_string: String,\n    mask: u64,\n}\n\n// Custom matcher, that will properly handle prerelease.\n// https://github.com/dtolnay/semver/issues/323#issuecomment-2432169904\nfn version_matches(version: &semver::Version, req: &semver::VersionReq) -> bool {\n    if req.matches(version) {\n        return true;\n    }\n\n    // This custom matching logic is needed, because semver cannot compare different version with pre-release tags\n    let mut version_without_pre = version.clone();\n    version_without_pre.pre = \"\".parse().unwrap();\n    for comp in &req.comparators {\n        if comp.matches(version) {\n            continue;\n        }\n\n        // If major & minor & patch are the same (or omitted),\n        // this means there is a mismatch on the pre-release tag\n        if comp.major == version.major\n            && comp.minor.is_none_or(|m| m == version.minor)\n            && comp.patch.is_none_or(|p| p == version.patch)\n        {\n            return false;\n        }\n\n        // Otherwise, compare without pre-release tags\n        let mut comp_without_pre = comp.clone();\n        comp_without_pre.pre = \"\".parse().unwrap();\n        if !comp_without_pre.matches(&version_without_pre) {\n            return false;\n        }\n    }\n    true\n}\n\nimpl ClickHouseQuirks {\n    pub fn new(version_string: String) -> Self {\n        // Version::parse() supports only x.y.z and nothing more, but we don't need anything more,\n        // only .minor may include new features.\n        let components = version_string\n            .strip_prefix('v')\n            .unwrap_or(&version_string)\n            .split('.')\n            .collect::<Vec<&str>>();\n        let mut ver_maj_min_patch_pre = components[0..3].join(\".\");\n        let version_pre = components.last().unwrap_or(&\"-testing\");\n        if !version_pre.ends_with(\"-stable\") {\n            log::warn!(\n                \"Non-stable version detected ({}), treating as older/development version\",\n                version_string\n            );\n            ver_maj_min_patch_pre.push_str(&format!(\n                \"-{}\",\n                version_pre\n                    .split('-')\n                    .collect::<Vec<&str>>()\n                    .last()\n                    .unwrap_or(&\"alpha\")\n            ));\n        }\n        log::debug!(\"Version (maj.min.patch.pre): {}\", ver_maj_min_patch_pre);\n\n        let version = Version::parse(ver_maj_min_patch_pre.as_str())\n            .unwrap_or_else(|_| panic!(\"Cannot parse version: {}\", ver_maj_min_patch_pre));\n        log::debug!(\"Version: {}\", version);\n\n        let mut mask: u64 = 0;\n        for quirk in &QUIRKS {\n            let version_requirement = VersionReq::parse(quirk.0)\n                .unwrap_or_else(|_| panic!(\"Cannot parse version requirements for {:?}\", quirk.1));\n            if version_matches(&version, &version_requirement) {\n                mask |= quirk.1 as u64;\n                log::warn!(\"Apply quirk {:?}\", quirk.1);\n            }\n        }\n\n        return Self {\n            version_string,\n            mask,\n        };\n    }\n\n    pub fn get_version(&self) -> String {\n        return self.version_string.clone();\n    }\n\n    pub fn has(&self, quirk: ClickHouseAvailableQuirks) -> bool {\n        return (self.mask & quirk as u64) != 0;\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_stable_version() {\n        let quirks = ClickHouseQuirks::new(\"25.11.1.1-stable\".to_string());\n        assert_eq!(quirks.get_version(), \"25.11.1.1-stable\");\n        assert!(quirks.has(ClickHouseAvailableQuirks::SystemReplicasUUID));\n        assert!(quirks.has(ClickHouseAvailableQuirks::ProcessesPeakThreadsUsage));\n        assert!(quirks.has(ClickHouseAvailableQuirks::TraceLogHasSymbols));\n    }\n\n    #[test]\n    fn test_testing_version() {\n        let quirks = ClickHouseQuirks::new(\"25.11.1.1-testing\".to_string());\n        assert_eq!(quirks.get_version(), \"25.11.1.1-testing\");\n        assert!(!quirks.has(ClickHouseAvailableQuirks::SystemReplicasUUID));\n        assert!(!quirks.has(ClickHouseAvailableQuirks::ProcessesPeakThreadsUsage));\n    }\n\n    #[test]\n    fn test_next_testing_prerelease_version() {\n        let quirks = ClickHouseQuirks::new(\"25.12.1.1-testing\".to_string());\n        assert_eq!(quirks.get_version(), \"25.12.1.1-testing\");\n        assert!(quirks.has(ClickHouseAvailableQuirks::SystemReplicasUUID));\n        assert!(quirks.has(ClickHouseAvailableQuirks::ProcessesPeakThreadsUsage));\n    }\n\n    #[test]\n    fn test_version_with_v_prefix() {\n        let quirks = ClickHouseQuirks::new(\"v25.11.1.1-stable\".to_string());\n        assert_eq!(quirks.get_version(), \"v25.11.1.1-stable\");\n        assert!(quirks.has(ClickHouseAvailableQuirks::SystemReplicasUUID));\n    }\n\n    // Here are the tests only for version_matches(), in other aspects we are relying on semver tests\n}\n"
  },
  {
    "path": "src/interpreter/context.rs",
    "content": "use crate::actions::ActionDescription;\nuse crate::interpreter::{\n    ClickHouse, Worker,\n    debug_metrics::DebugMetrics,\n    options::{ChDigOptions, ChDigViews},\n    perfetto::PerfettoServer,\n};\nuse anyhow::Result;\nuse chrono::Duration;\nuse cursive::{Cursive, View, event::Event, event::EventResult, views::Dialog, views::OnEventView};\nuse std::sync::{Arc, Condvar, Mutex, atomic};\n\npub type ContextArc = Arc<Mutex<Context>>;\n\ntype GlobalActionCallback = Arc<Box<dyn Fn(&mut Cursive) + Send + Sync>>;\npub struct GlobalAction {\n    pub description: ActionDescription,\n    pub callback: GlobalActionCallback,\n}\n\ntype ViewActionCallback =\n    Arc<Box<dyn Fn(&mut dyn View) -> Result<Option<EventResult>> + Send + Sync>>;\npub struct ViewAction {\n    pub description: ActionDescription,\n    pub callback: ViewActionCallback,\n}\n\npub struct Context {\n    pub options: ChDigOptions,\n\n    pub clickhouse: Arc<ClickHouse>,\n    pub server_version: String,\n    pub worker: Worker,\n    pub background_runner_cv: Arc<(Mutex<()>, Condvar)>,\n    pub background_runner_force: Arc<atomic::AtomicBool>,\n    pub background_runner_summary_force: Arc<atomic::AtomicBool>,\n\n    pub cb_sink: cursive::CbSink,\n\n    pub global_actions: Vec<GlobalAction>,\n    pub views_menu_actions: Vec<GlobalAction>,\n    pub view_actions: Vec<ViewAction>,\n\n    pub pending_view_callback: Option<ViewActionCallback>,\n    pub view_registry: crate::view::ViewRegistry,\n\n    pub search_history: crate::view::search_history::SearchHistory,\n\n    pub selected_host: Option<String>,\n    pub current_view: Option<ChDigViews>,\n\n    pub perfetto_server: Option<Arc<PerfettoServer>>,\n\n    pub queries_filter: Arc<Mutex<String>>,\n    pub queries_limit: Arc<Mutex<u64>>,\n\n    pub debug_metrics: Arc<DebugMetrics>,\n}\n\nimpl Context {\n    pub async fn new(\n        options: ChDigOptions,\n        clickhouse: Arc<ClickHouse>,\n        cb_sink: cursive::CbSink,\n    ) -> Result<ContextArc> {\n        let server_version = clickhouse.version();\n        let debug_metrics = DebugMetrics::new();\n        let worker = Worker::new();\n        let background_runner_cv = Arc::new((Mutex::new(()), Condvar::new()));\n        let background_runner_force = Arc::new(atomic::AtomicBool::new(false));\n        let background_runner_summary_force = Arc::new(atomic::AtomicBool::new(false));\n\n        let view_registry = crate::view::ViewRegistry::new();\n\n        let queries_filter = Arc::new(Mutex::new(String::new()));\n        let queries_limit = Arc::new(Mutex::new(options.view.queries_limit));\n\n        // Metrics are always collected; display is toggled with `!`. The refresh thread\n        // sleeps when hidden, so this is free when unused.\n        debug_metrics.spawn_refresh(cb_sink.clone(), std::time::Duration::from_millis(500));\n\n        let context = Arc::new(Mutex::new(Context {\n            options,\n            clickhouse,\n            server_version,\n            worker,\n            background_runner_cv,\n            background_runner_force,\n            background_runner_summary_force,\n            cb_sink,\n            global_actions: Vec::new(),\n            views_menu_actions: Vec::new(),\n            view_actions: Vec::new(),\n            pending_view_callback: None,\n            view_registry,\n            search_history: crate::view::search_history::SearchHistory::new(),\n            selected_host: None,\n            current_view: None,\n            perfetto_server: None,\n            queries_filter,\n            queries_limit,\n            debug_metrics,\n        }));\n\n        context.lock().unwrap().worker.start(context.clone());\n\n        return Ok(context);\n    }\n\n    pub fn add_global_action<F, E>(\n        &mut self,\n        siv: &mut Cursive,\n        text: &'static str,\n        event: E,\n        cb: F,\n    ) where\n        F: Fn(&mut Cursive) + Send + Sync + Copy + 'static,\n        E: Into<Event>,\n    {\n        let event = event.into();\n        let action = GlobalAction {\n            description: ActionDescription { text, event },\n            callback: Arc::new(Box::new(cb)),\n        };\n        siv.add_global_callback(action.description.event.clone(), cb);\n        self.global_actions.push(action);\n    }\n    pub fn add_global_action_without_shortcut<F>(\n        &mut self,\n        siv: &mut Cursive,\n        text: &'static str,\n        cb: F,\n    ) where\n        F: Fn(&mut Cursive) + Send + Sync + Copy + 'static,\n    {\n        return self.add_global_action(siv, text, Event::Unknown(Vec::from([0u8])), cb);\n    }\n\n    pub fn add_view<F>(&mut self, text: &'static str, cb: F)\n    where\n        F: Fn(&mut Cursive) + Send + Sync + 'static,\n    {\n        let action = GlobalAction {\n            description: ActionDescription {\n                text,\n                event: Event::Unknown(Vec::from([0u8])),\n            },\n            callback: Arc::new(Box::new(cb)),\n        };\n        self.views_menu_actions.push(action);\n    }\n\n    pub fn register_provider(&mut self, provider: Arc<dyn crate::view::ViewProvider>) {\n        let name = provider.name();\n        self.view_registry.register(provider);\n        self.add_view(name, move |siv| {\n            let context = siv.user_data::<ContextArc>().unwrap().clone();\n            let provider = context.lock().unwrap().view_registry.get(name);\n            {\n                let mut ctx = context.lock().unwrap();\n                ctx.current_view = Some(provider.view_type());\n            }\n            provider.show(siv, context.clone());\n        });\n    }\n\n    pub fn add_view_action<F, E, V>(\n        &mut self,\n        view: &mut OnEventView<V>,\n        text: &'static str,\n        event: E,\n        cb: F,\n    ) where\n        F: Fn(&mut dyn View) -> Result<Option<EventResult>> + Send + Sync + Copy + 'static,\n        E: Into<Event>,\n        V: View,\n    {\n        let event = event.into();\n        let action = ViewAction {\n            description: ActionDescription { text, event },\n            callback: Arc::new(Box::new(cb)),\n        };\n        let event = action.description.event.clone();\n        let cb = action.callback.clone();\n        view.set_on_event_inner(event, move |sub_view, _event| {\n            let result = cb.as_ref()(sub_view);\n            match result {\n                Err(err) => {\n                    return Some(EventResult::with_cb_once(move |siv: &mut Cursive| {\n                        siv.add_layer(Dialog::info(err.to_string()));\n                    }));\n                }\n                Ok(event) => return event,\n            }\n        });\n        self.view_actions.push(action);\n    }\n\n    pub fn add_view_action_without_shortcut<F, V>(\n        &mut self,\n        view: &mut OnEventView<V>,\n        text: &'static str,\n        cb: F,\n    ) where\n        F: Fn(&mut dyn View) -> Result<Option<EventResult>> + Send + Sync + Copy + 'static,\n        V: View,\n    {\n        return self.add_view_action(view, text, Event::Unknown(Vec::from([0u8])), cb);\n    }\n\n    pub fn get_or_start_perfetto_server(&mut self) -> Arc<PerfettoServer> {\n        if let Some(ref server) = self.perfetto_server {\n            return server.clone();\n        }\n        let server = Arc::new(PerfettoServer::new());\n        self.perfetto_server = Some(server.clone());\n        server\n    }\n\n    pub fn trigger_view_refresh(&self) {\n        self.background_runner_force\n            .store(true, atomic::Ordering::SeqCst);\n        self.background_runner_summary_force\n            .store(true, atomic::Ordering::SeqCst);\n        self.background_runner_cv.1.notify_all();\n    }\n\n    pub fn shift_time_interval(&mut self, is_sub: bool, minutes: i64) {\n        let new_start = &mut self.options.view.start;\n        let new_end = &mut self.options.view.end;\n\n        if is_sub {\n            *new_start -= Duration::try_minutes(minutes).unwrap();\n            *new_end -= Duration::try_minutes(minutes).unwrap();\n            log::debug!(\n                \"Set time frame to ({}, {}) ({} minutes backward)\",\n                new_start,\n                new_end,\n                minutes\n            );\n        } else {\n            *new_start += Duration::try_minutes(minutes).unwrap();\n            *new_end += Duration::try_minutes(minutes).unwrap();\n            log::debug!(\n                \"Set time frame to ({}, {}) ({} minutes forward)\",\n                new_start,\n                new_end,\n                minutes\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "src/interpreter/debug_metrics.rs",
    "content": "//! Internal chdig observability counters, rendered into the status bar when toggled with `!`.\n//!\n//! Metrics are recorded unconditionally — the cost is two atomic ops per worker event plus a\n//! lock-and-push on a ~256-entry ring buffer. Display is gated on a toggle flag: when off\n//! the refresh thread sleeps and does not ping the event loop, so there is no UI cost either.\n//!\n//! Picks:\n//! - Nearest-rank percentile over a fixed-size [`Histogram`] (O(N log N) per snapshot,\n//!   N≤256). Simpler than an online estimator (t-digest, HDR histogram) and accurate enough\n//!   for a status bar at a few Hz.\n//! - Event-loop latency is measured as a `cb_sink` round-trip, not frame render time.\n//!   Cursive does not expose per-frame hooks; round-trip drift is the quantity the user\n//!   actually perceives as \"responsiveness\". Tracked as a histogram (not a single latest\n//!   value) so transient spikes don't get hidden behind whatever the most recent ping saw.\n//! - [`InFlightGuard`] is an RAII guard so early returns and panics in the worker cannot\n//!   leak the counter.\n\nuse std::collections::VecDeque;\nuse std::fmt;\nuse std::sync::atomic::{AtomicBool, AtomicU64, Ordering};\nuse std::sync::{Arc, Mutex};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\nuse cursive::CbSink;\n\nconst SAMPLES_CAPACITY: usize = 256;\n\n/// Fixed-capacity ring-buffer histogram over `Duration` samples. Thread-safe via an\n/// internal `Mutex` — contention is negligible at the rates we record (≤ a few Hz).\npub struct Histogram {\n    samples: Mutex<VecDeque<Duration>>,\n}\n\nimpl Histogram {\n    fn new() -> Self {\n        Histogram {\n            samples: Mutex::new(VecDeque::with_capacity(SAMPLES_CAPACITY)),\n        }\n    }\n\n    pub fn record(&self, d: Duration) {\n        let mut s = self.samples.lock().unwrap();\n        if s.len() == SAMPLES_CAPACITY {\n            s.pop_front();\n        }\n        s.push_back(d);\n    }\n\n    /// Nearest-rank (p50, p90, p99). Returns zeros on an empty histogram.\n    pub fn percentiles(&self) -> (Duration, Duration, Duration) {\n        let s = self.samples.lock().unwrap();\n        if s.is_empty() {\n            return (Duration::ZERO, Duration::ZERO, Duration::ZERO);\n        }\n        let mut v: Vec<Duration> = s.iter().copied().collect();\n        v.sort_unstable();\n        (percentile(&v, 50), percentile(&v, 90), percentile(&v, 99))\n    }\n}\n\npub struct DebugMetrics {\n    shown: AtomicBool,\n    in_flight: AtomicU64,\n    /// `cb_sink` round-trip latency — proxy for \"how responsive does chdig feel\".\n    ui_lag: Histogram,\n    /// Per-worker-event processing duration (a worker event is one ClickHouse query /\n    /// action chdig issued).\n    event: Histogram,\n}\n\n#[must_use = \"Drop decrements the in-flight counter; hold this for the duration of work\"]\npub struct InFlightGuard(Arc<DebugMetrics>);\n\nimpl Drop for InFlightGuard {\n    fn drop(&mut self) {\n        self.0.in_flight.fetch_sub(1, Ordering::Relaxed);\n    }\n}\n\nimpl DebugMetrics {\n    pub fn new() -> Arc<Self> {\n        Arc::new(DebugMetrics {\n            shown: AtomicBool::new(false),\n            in_flight: AtomicU64::new(0),\n            ui_lag: Histogram::new(),\n            event: Histogram::new(),\n        })\n    }\n\n    pub fn is_shown(&self) -> bool {\n        self.shown.load(Ordering::Relaxed)\n    }\n\n    /// Flips visibility and returns the new state.\n    pub fn toggle_shown(&self) -> bool {\n        !self.shown.fetch_xor(true, Ordering::Relaxed)\n    }\n\n    pub fn track_in_flight(self: &Arc<Self>) -> InFlightGuard {\n        self.in_flight.fetch_add(1, Ordering::Relaxed);\n        InFlightGuard(Arc::clone(self))\n    }\n\n    pub fn record_event(&self, d: Duration) {\n        self.event.record(d);\n    }\n\n    pub fn record_ui_lag(&self, d: Duration) {\n        self.ui_lag.record(d);\n    }\n\n    pub fn snapshot(&self) -> MetricsSnapshot {\n        let (lag_p50, lag_p90, lag_p99) = self.ui_lag.percentiles();\n        let (evt_p50, evt_p90, evt_p99) = self.event.percentiles();\n        MetricsSnapshot {\n            in_flight: self.in_flight.load(Ordering::Relaxed),\n            lag_p50,\n            lag_p90,\n            lag_p99,\n            evt_p50,\n            evt_p90,\n            evt_p99,\n        }\n    }\n\n    /// Spawn a background thread that, *while visibility is on*, probes event-loop lag\n    /// via a `cb_sink` round-trip and pushes the latest snapshot into the status bar.\n    /// When visibility is off the thread sleeps, so the hidden cost is just a dormant\n    /// thread (no cb_sink traffic, no redraws). Exits when the sink is closed.\n    pub fn spawn_refresh(self: &Arc<Self>, cb_sink: CbSink, interval: Duration) {\n        let metrics = Arc::clone(self);\n        thread::Builder::new()\n            .name(\"chdig-debug-metrics\".into())\n            .spawn(move || refresh_loop(metrics, cb_sink, interval))\n            .expect(\"spawn chdig-debug-metrics\");\n    }\n}\n\nfn refresh_loop(metrics: Arc<DebugMetrics>, cb_sink: CbSink, interval: Duration) {\n    loop {\n        thread::sleep(interval);\n        if !metrics.is_shown() {\n            continue;\n        }\n        let sent_at = Instant::now();\n        let metrics = Arc::clone(&metrics);\n        let send_result = cb_sink.send(Box::new(move |siv: &mut cursive::Cursive| {\n            metrics.record_ui_lag(sent_at.elapsed());\n            let text = metrics.snapshot().to_string();\n            crate::view::Navigation::set_statusbar_debug(siv, text);\n        }));\n        if send_result.is_err() {\n            break;\n        }\n    }\n}\n\n#[derive(Default, Clone, Copy)]\npub struct MetricsSnapshot {\n    pub in_flight: u64,\n    pub lag_p50: Duration,\n    pub lag_p90: Duration,\n    pub lag_p99: Duration,\n    pub evt_p50: Duration,\n    pub evt_p90: Duration,\n    pub evt_p99: Duration,\n}\n\nimpl fmt::Display for MetricsSnapshot {\n    /// Status-bar line; written to be readable without a legend:\n    ///   * `UI lag`   – cb_sink round-trip percentiles (event loop responsiveness)\n    ///   * `Active`   – worker events currently being processed\n    ///   * `Event`    – worker-event processing-time percentiles (one per ClickHouse query)\n    ///\n    /// All triples are `p50/p90/p99`, nearest-rank over the last [`SAMPLES_CAPACITY`]\n    /// samples of each kind.\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(\n            f,\n            \"UI lag p50/p90/p99: {}/{}/{} ms  Active: {}  Event p50/p90/p99: {}/{}/{} ms\",\n            self.lag_p50.as_millis(),\n            self.lag_p90.as_millis(),\n            self.lag_p99.as_millis(),\n            self.in_flight,\n            self.evt_p50.as_millis(),\n            self.evt_p90.as_millis(),\n            self.evt_p99.as_millis(),\n        )\n    }\n}\n\n/// Nearest-rank percentile; q ∈ 0..=100. Undefined on an empty slice — callers must guard.\nfn percentile<T: Copy>(sorted: &[T], q: u32) -> T {\n    debug_assert!(q <= 100);\n    debug_assert!(!sorted.is_empty());\n    let rank = (q as usize * sorted.len()).div_ceil(100).max(1);\n    sorted[rank - 1]\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn percentile_integer_ranks() {\n        let v: Vec<u64> = (1..=10).collect();\n        assert_eq!(percentile(&v, 50), 5);\n        assert_eq!(percentile(&v, 90), 9);\n        assert_eq!(percentile(&v, 99), 10);\n        assert_eq!(percentile(&v, 100), 10);\n    }\n\n    #[test]\n    fn percentile_single_element() {\n        assert_eq!(percentile(&[42u64], 50), 42);\n        assert_eq!(percentile(&[42u64], 99), 42);\n    }\n\n    #[test]\n    fn histogram_caps_at_capacity() {\n        let h = Histogram::new();\n        // Feed monotonic samples well past capacity and assert that the p99 reflects\n        // only the most recent SAMPLES_CAPACITY values (earliest ones were evicted).\n        let total = SAMPLES_CAPACITY + 50;\n        for i in 0..total {\n            h.record(Duration::from_millis(i as u64));\n        }\n        let (_p50, _p90, p99) = h.percentiles();\n        // Oldest retained = total - SAMPLES_CAPACITY = 50; newest = total - 1 = 305.\n        // Nearest-rank p99: rank = ceil(99 * 256 / 100) = 254; value = 50 + (254-1) = 303.\n        assert_eq!(p99, Duration::from_millis(303));\n    }\n\n    #[test]\n    fn histogram_empty_returns_zero() {\n        let h = Histogram::new();\n        assert_eq!(\n            h.percentiles(),\n            (Duration::ZERO, Duration::ZERO, Duration::ZERO)\n        );\n    }\n\n    #[test]\n    fn ui_lag_and_event_are_independent() {\n        let m = DebugMetrics::new();\n        m.record_ui_lag(Duration::from_millis(5));\n        m.record_event(Duration::from_millis(500));\n        let s = m.snapshot();\n        assert_eq!(s.lag_p50, Duration::from_millis(5));\n        assert_eq!(s.evt_p50, Duration::from_millis(500));\n    }\n\n    #[test]\n    fn in_flight_guard_is_raii() {\n        let m = DebugMetrics::new();\n        assert_eq!(m.snapshot().in_flight, 0);\n        let g1 = m.track_in_flight();\n        let g2 = m.track_in_flight();\n        assert_eq!(m.snapshot().in_flight, 2);\n        drop(g1);\n        assert_eq!(m.snapshot().in_flight, 1);\n        drop(g2);\n        assert_eq!(m.snapshot().in_flight, 0);\n    }\n\n    #[test]\n    fn toggle_shown_returns_new_state() {\n        let m = DebugMetrics::new();\n        assert!(!m.is_shown());\n        assert!(m.toggle_shown());\n        assert!(m.is_shown());\n        assert!(!m.toggle_shown());\n        assert!(!m.is_shown());\n    }\n\n    #[test]\n    fn display_format_is_readable() {\n        let s = MetricsSnapshot {\n            in_flight: 3,\n            lag_p50: Duration::from_millis(1),\n            lag_p90: Duration::from_millis(4),\n            lag_p99: Duration::from_millis(12),\n            evt_p50: Duration::from_millis(12),\n            evt_p90: Duration::from_millis(87),\n            evt_p99: Duration::from_millis(420),\n        };\n        let rendered = s.to_string();\n        assert!(rendered.contains(\"UI lag p50/p90/p99: 1/4/12 ms\"));\n        assert!(rendered.contains(\"Active: 3\"));\n        assert!(rendered.contains(\"Event p50/p90/p99: 12/87/420 ms\"));\n    }\n}\n"
  },
  {
    "path": "src/interpreter/flamegraph.rs",
    "content": "use crate::interpreter::clickhouse::Columns;\nuse crate::pastila;\nuse anyhow::{Error, Result};\nuse crossterm::event::{self, Event as CrosstermEvent, KeyEventKind};\nuse flamelens::app::{App, AppResult};\nuse flamelens::flame::FlameGraph;\nuse flamelens::handler::handle_key_events;\nuse flamelens::ui;\nuse ratatui::Terminal;\nuse ratatui::backend::CrosstermBackend;\nuse std::io;\n\npub fn block_to_folded(block: &Columns) -> String {\n    block\n        .rows()\n        .map(|x| {\n            [\n                x.get::<String, _>(0).unwrap(),\n                x.get::<u64, _>(1).unwrap().to_string(),\n            ]\n            .join(\" \")\n        })\n        .collect::<Vec<String>>()\n        .join(\"\\n\")\n}\n\nfn run_flamelens(mut app: App) -> AppResult<()> {\n    let backend = CrosstermBackend::new(io::stderr());\n    let mut terminal = Terminal::new(backend)?;\n    let timeout = std::time::Duration::from_secs(1);\n\n    terminal.clear()?;\n\n    // Start the main loop.\n    while app.running {\n        terminal.draw(|frame| {\n            ui::render(&mut app, frame);\n            if let Some(input_buffer) = &app.input_buffer\n                && let Some(cursor) = input_buffer.cursor\n            {\n                frame.set_cursor_position((cursor.0, cursor.1));\n            }\n        })?;\n\n        // FIXME: note, right now I cannot use EventHandle with Tui, since EventHandle is not\n        // terminated gracefully\n        if event::poll(timeout).expect(\"failed to poll new events\") {\n            match event::read().expect(\"unable to read event\") {\n                CrosstermEvent::Key(e) => {\n                    if e.kind == KeyEventKind::Press {\n                        handle_key_events(e, &mut app)?\n                    }\n                }\n                CrosstermEvent::Mouse(_e) => {}\n                CrosstermEvent::Resize(_w, _h) => {}\n                CrosstermEvent::FocusGained => {}\n                CrosstermEvent::FocusLost => {}\n                CrosstermEvent::Paste(_) => {}\n            }\n        }\n    }\n\n    terminal.clear()?;\n    // ratatui's Terminal::drop may shows the cursor, re-hide it for cursive\n    drop(terminal);\n    crossterm::execute!(io::stderr(), crossterm::cursor::Hide)?;\n\n    Ok(())\n}\n\npub fn show(title: &'static str, data: String) -> AppResult<()> {\n    if data.trim().is_empty() {\n        return Err(Error::msg(\"Flamegraph is empty\").into());\n    }\n\n    let flamegraph = FlameGraph::from_string(data, true);\n    run_flamelens(App::with_flamegraph(title, flamegraph))\n}\n\n/// Show a differential flamegraph: `after` rendered with per-frame coloring\n/// against the `before` baseline (handled by flamelens's `diff_mode`).\npub fn show_diff(title: &'static str, before: String, after: String) -> AppResult<()> {\n    if before.trim().is_empty() && after.trim().is_empty() {\n        return Err(Error::msg(\"Flamegraph diff is empty (both queries have no samples)\").into());\n    }\n\n    let before_fg = FlameGraph::from_string(before, true);\n    let mut after_fg = FlameGraph::from_string(after, true);\n    after_fg.set_diff_against(&before_fg);\n    run_flamelens(App::with_flamegraph(title, after_fg))\n}\n\npub async fn share(\n    data: String,\n    pastila_clickhouse_host: &str,\n    pastila_url: &str,\n) -> Result<String> {\n    if data.trim().is_empty() {\n        return Err(Error::msg(\"Flamegraph is empty\"));\n    }\n\n    let pastila_url =\n        pastila::upload_encrypted(&data, pastila_clickhouse_host, pastila_url).await?;\n    return Ok(format!(\"https://whodidit.you/#profileURL={}\", pastila_url));\n}\n"
  },
  {
    "path": "src/interpreter/mod.rs",
    "content": "// pub for clickhouse::Columns\nmod background_runner;\npub mod clickhouse;\nmod clickhouse_quirks;\nmod context;\npub mod debug_metrics;\nmod query;\nmod worker;\n// only functions\npub mod flamegraph;\npub mod options;\npub mod perfetto;\n\npub use clickhouse::ClickHouse;\npub use clickhouse::TextLogArguments;\npub use clickhouse_quirks::ClickHouseAvailableQuirks;\npub use clickhouse_quirks::ClickHouseQuirks;\npub use context::Context;\npub use context::ContextArc;\npub use worker::Worker;\n\npub type WorkerEvent = worker::Event;\npub type Query = query::Query;\npub type BackgroundRunner = background_runner::BackgroundRunner;\n"
  },
  {
    "path": "src/interpreter/options.rs",
    "content": "use crate::common::RelativeDateTime;\nuse anyhow::{Result, anyhow};\nuse clap::{ArgAction, Args, CommandFactory, Parser, Subcommand, ValueEnum, builder::ArgPredicate};\nuse clap_complete::{Shell, generate};\nuse percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};\nuse quick_xml::de::Deserializer as XmlDeserializer;\nuse serde::Deserialize;\nuse serde_yaml::Deserializer as YamlDeserializer;\nuse std::collections::HashMap;\nuse std::env;\nuse std::ffi::OsString;\nuse std::fs;\nuse std::io;\nuse std::net::{SocketAddr, ToSocketAddrs};\nuse std::path;\nuse std::process;\nuse std::str::FromStr;\nuse std::time;\n\n#[derive(Deserialize, Debug, PartialEq)]\nstruct ClickHouseClientConfigOpenSSLClient {\n    #[serde(rename = \"verificationMode\")]\n    verification_mode: Option<String>,\n    #[serde(rename = \"certificateFile\")]\n    certificate_file: Option<String>,\n    #[serde(rename = \"privateKeyFile\")]\n    private_key_file: Option<String>,\n    #[serde(rename = \"caConfig\")]\n    ca_config: Option<String>,\n}\n#[derive(Deserialize, Debug, PartialEq)]\nstruct ClickHouseClientConfigOpenSSL {\n    client: Option<ClickHouseClientConfigOpenSSLClient>,\n}\n\n#[derive(Deserialize, Debug, PartialEq)]\nstruct ClickHouseClientConfigConnectionsCredentials {\n    name: String,\n    hostname: Option<String>,\n    port: Option<u16>,\n    user: Option<String>,\n    password: Option<String>,\n    secure: Option<bool>,\n    // chdig analog for accept_invalid_certificate\n    skip_verify: Option<bool>,\n    #[serde(rename = \"accept-invalid-certificate\")]\n    accept_invalid_certificate: Option<bool>,\n    ca_certificate: Option<String>,\n    client_certificate: Option<String>,\n    client_private_key: Option<String>,\n    history_file: Option<String>,\n}\n#[derive(Deserialize, Default, Debug, PartialEq)]\nstruct ClickHouseClientConfig {\n    user: Option<String>,\n    password: Option<String>,\n    secure: Option<bool>,\n    // chdig analog for accept_invalid_certificate\n    skip_verify: Option<bool>,\n    #[serde(rename = \"accept-invalid-certificate\")]\n    accept_invalid_certificate: Option<bool>,\n    open_ssl: Option<ClickHouseClientConfigOpenSSL>,\n    history_file: Option<String>,\n    connections_credentials: Vec<ClickHouseClientConfigConnectionsCredentials>,\n}\n\n#[derive(Deserialize, Default)]\nstruct XmlClickHouseClientConfigConnectionsCredentialsConnection {\n    connection: Option<Vec<ClickHouseClientConfigConnectionsCredentials>>,\n}\n#[derive(Deserialize)]\nstruct XmlClickHouseClientConfig {\n    user: Option<String>,\n    password: Option<String>,\n    secure: Option<bool>,\n    // chdig analog for accept_invalid_certificate\n    skip_verify: Option<bool>,\n    #[serde(rename = \"accept-invalid-certificate\")]\n    accept_invalid_certificate: Option<bool>,\n    #[serde(rename = \"openSSL\")]\n    open_ssl: Option<ClickHouseClientConfigOpenSSL>,\n    history_file: Option<String>,\n    connections_credentials: Option<XmlClickHouseClientConfigConnectionsCredentialsConnection>,\n}\n\n#[derive(Deserialize)]\nstruct YamlClickHouseClientConfig {\n    user: Option<String>,\n    password: Option<String>,\n    secure: Option<bool>,\n    // chdig analog for accept_invalid_certificate\n    skip_verify: Option<bool>,\n    #[serde(rename = \"accept-invalid-certificate\")]\n    accept_invalid_certificate: Option<bool>,\n    #[serde(rename = \"openSSL\")]\n    open_ssl: Option<ClickHouseClientConfigOpenSSL>,\n    history_file: Option<String>,\n    connections_credentials: Option<HashMap<String, ClickHouseClientConfigConnectionsCredentials>>,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Subcommand)]\npub enum ChDigViews {\n    /// Show now running queries (from system.processes)\n    Queries,\n    /// Show last running queries (from system.query_log)\n    LastQueries,\n    /// Show slow (slower then 1 second, ordered by duration) queries (from system.query_log)\n    SlowQueries,\n    /// Show merges for MergeTree engine (system.merges)\n    Merges,\n    /// Show S3 Queue (system.s3queue_metadata_cache)\n    S3Queue,\n    /// Show Azure Queue (system.azure_queue_metadata_cache)\n    AzureQueue,\n    /// Show mutations for MergeTree engine (system.mutations)\n    Mutations,\n    /// Show replication queue for ReplicatedMergeTree engine (system.replication_queue)\n    ReplicationQueue,\n    /// Show fetches for ReplicatedMergeTree engine (system.replicated_fetches)\n    ReplicatedFetches,\n    /// Show information about replicas (system.replicas)\n    Replicas,\n    /// Tables\n    Tables,\n    /// Show all errors that happened in a server since start (system.errors)\n    Errors,\n    /// Show information about backups (system.backups)\n    Backups,\n    /// Show information about dictionaries (system.dictionaries)\n    Dictionaries,\n    /// Show server logs (system.text_log)\n    ServerLogs,\n    /// Show loggers (system.text_log)\n    Loggers,\n    /// Show background schedule pool tasks (system.background_schedule_pool)\n    BackgroundSchedulePool,\n    /// Show background schedule pool logs (system.background_schedule_pool_log)\n    BackgroundSchedulePoolLog,\n    /// Show table parts (system.parts)\n    TableParts,\n    /// Show asynchronous inserts (system.asynchronous_inserts)\n    AsynchronousInserts,\n    /// Show part log (system.part_log)\n    PartLog,\n    /// Spawn client inside chdig\n    Client,\n}\n\n#[derive(Parser, Clone)]\n#[command(name = \"chdig\")]\n#[command(author, version, about, long_about = None)]\npub struct ChDigOptions {\n    #[command(flatten)]\n    pub clickhouse: ClickHouseOptions,\n    #[command(flatten)]\n    pub view: ViewOptions,\n    #[command(subcommand)]\n    pub start_view: Option<ChDigViews>,\n    #[command(flatten)]\n    pub service: ServiceOptions,\n    #[clap(skip)]\n    pub perfetto: ChDigPerfettoConfig,\n}\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, ValueEnum, Deserialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum LogsOrder {\n    #[default]\n    Asc,\n    Desc,\n}\n\n#[derive(Args, Clone, Default)]\npub struct ClickHouseOptions {\n    #[arg(short('u'), long, value_name = \"URL\", env = \"CHDIG_URL\")]\n    pub url: Option<String>,\n    /// Overrides host in --url (for clickhouse-client compatibility)\n    #[arg(long, env = \"CLICKHOUSE_HOST\")]\n    pub host: Option<String>,\n    /// Overrides port in --url (for clickhouse-client compatibility)\n    #[arg(long)]\n    pub port: Option<u16>,\n    /// Overrides user in --url (for clickhouse-client compatibility)\n    #[arg(long, env = \"CLICKHOUSE_USER\")]\n    pub user: Option<String>,\n    /// Overrides password in --url (for clickhouse-client compatibility)\n    #[arg(long, env = \"CLICKHOUSE_PASSWORD\")]\n    pub password: Option<String>,\n    /// Overrides secure=1 in --url (for clickhouse-client compatibility)\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub secure: bool,\n    /// ClickHouse like config (with some advanced features)\n    #[arg(long, env = \"CLICKHOUSE_CONFIG\")]\n    pub config: Option<String>,\n    #[arg(short('C'), long)]\n    pub connection: Option<String>,\n    // Safe version for \"url\" (to show in UI)\n    #[clap(skip)]\n    pub url_safe: String,\n    #[arg(short('c'), long)]\n    pub cluster: Option<String>,\n    /// Aggregate system.*_log historical data, using merge()\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub history: bool,\n    #[arg(long, action = ArgAction::SetTrue, overrides_with = \"history\")]\n    pub no_history: bool,\n    /// Do not hide internal (spawned by chdig) queries\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub internal_queries: bool,\n    #[arg(long, action = ArgAction::SetTrue, overrides_with = \"internal_queries\")]\n    pub no_internal_queries: bool,\n    /// Limit for logs\n    #[arg(long, default_value_t = 100000)]\n    pub limit: u64,\n    /// Sort order for logs (desc returns the newest --limit rows, useful for long backups)\n    #[arg(long, value_enum, default_value_t = LogsOrder::Asc)]\n    pub logs_order: LogsOrder,\n    /// Override server version (for dev builds with features already available). Should include\n    /// at least three components (maj.min.patch)\n    #[arg(long, hide = true)]\n    pub server_version: Option<String>,\n    /// Skip unavailable shards in distributed queries\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub skip_unavailable_shards: bool,\n    #[clap(skip)]\n    pub history_file: Option<String>,\n}\n\nimpl ClickHouseOptions {\n    pub fn connection_info(&self) -> String {\n        if let Some(ref connection) = self.connection {\n            connection.clone()\n        } else if let Ok(url) = url::Url::parse(&self.url_safe) {\n            url.host_str().unwrap_or(\"localhost\").to_string()\n        } else {\n            self.url_safe.clone()\n        }\n    }\n}\n\n#[derive(Args, Clone)]\npub struct ViewOptions {\n    #[arg(\n        short('d'),\n        long,\n        value_parser = |arg: &str| -> Result<time::Duration> {Ok(time::Duration::from_millis(arg.parse()?))},\n        default_value = \"30000\",\n    )]\n    pub delay_interval: time::Duration,\n\n    #[arg(short('g'), long, action = ArgAction::SetTrue, default_value_if(\"cluster\", ArgPredicate::IsPresent, Some(\"true\")))]\n    /// Grouping distributed queries (turned on by default in --cluster mode)\n    pub group_by: bool,\n    #[arg(short('G'), long, action = ArgAction::SetTrue, overrides_with = \"group_by\")]\n    no_group_by: bool,\n\n    #[arg(long, action = ArgAction::SetTrue)]\n    /// Do not accumulate metrics for subqueries in the initial query\n    pub no_subqueries: bool,\n\n    /// Use short option -b, like atop(1) has\n    #[arg(long, short('b'), default_value = \"1hour\")]\n    /// Begin of the time interval to look at\n    pub start: RelativeDateTime,\n    #[arg(long, short('e'), default_value = \"\")]\n    /// End of the time interval\n    pub end: RelativeDateTime,\n\n    /// Wrap long lines\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub wrap: bool,\n\n    /// Disable stripping common hostname prefix and suffix in queries and logs views\n    #[arg(long, action = ArgAction::SetTrue)]\n    pub no_strip_hostname_suffix: bool,\n\n    /// Limit for number of queries to render in queries views\n    #[arg(long, default_value_t = 10000)]\n    pub queries_limit: u64,\n    // TODO: --mouse/--no-mouse (see EXIT_MOUSE_SEQUENCE in termion)\n}\n\n#[derive(Args, Clone)]\npub struct ServiceOptions {\n    #[arg(long, value_enum)]\n    completion: Option<Shell>,\n    #[arg(long)]\n    /// Log (for debugging chdig itself)\n    pub log: Option<String>,\n    #[arg(\n        long,\n        default_value = \"https://uzg8q0g12h.eu-central-1.aws.clickhouse.cloud/?user=paste\"\n    )]\n    /// Pastila ClickHouse backend for uploading and sharing flamegraphs\n    pub pastila_clickhouse_host: String,\n    #[arg(long, default_value = \"https://pastila.nl/\")]\n    /// pastila.nl URL (only to show direct link to pastila in logs)\n    pub pastila_url: String,\n    /// Path to chdig config file (YAML)\n    #[arg(long, env = \"CHDIG_CONFIG\")]\n    pub chdig_config: Option<String>,\n}\n\n#[derive(Deserialize, Clone)]\n#[serde(default)]\npub struct ChDigPerfettoConfig {\n    pub opentelemetry_span_log: bool,\n    pub trace_log: bool,\n    pub query_metric_log: bool,\n    pub part_log: bool,\n    pub query_thread_log: bool,\n    pub text_log: bool,\n    pub text_log_android: bool,\n    pub per_server: bool,\n    pub metric_log: bool,\n    pub asynchronous_metric_log: bool,\n    pub asynchronous_insert_log: bool,\n    pub error_log: bool,\n    pub s3_queue_log: bool,\n    pub azure_queue_log: bool,\n    pub blob_storage_log: bool,\n    pub background_schedule_pool_log: bool,\n    pub session_log: bool,\n    pub aggregated_zookeeper_log: bool,\n}\n\nimpl Default for ChDigPerfettoConfig {\n    fn default() -> Self {\n        Self {\n            opentelemetry_span_log: true,\n            trace_log: true,\n            query_metric_log: false,\n            part_log: true,\n            query_thread_log: true,\n            text_log: true,\n            text_log_android: true,\n            per_server: true,\n            metric_log: true,\n            asynchronous_metric_log: false,\n            asynchronous_insert_log: true,\n            error_log: true,\n            s3_queue_log: true,\n            azure_queue_log: true,\n            blob_storage_log: true,\n            background_schedule_pool_log: true,\n            session_log: true,\n            aggregated_zookeeper_log: false,\n        }\n    }\n}\n\n#[derive(Deserialize, Default)]\n#[serde(default)]\nstruct ChDigConfig {\n    clickhouse: ChDigClickHouseConfig,\n    view: ChDigViewConfig,\n    service: ChDigServiceConfig,\n    perfetto: ChDigPerfettoConfig,\n}\n\n#[derive(Deserialize, Default)]\n#[serde(default)]\nstruct ChDigClickHouseConfig {\n    url: Option<String>,\n    host: Option<String>,\n    port: Option<u16>,\n    user: Option<String>,\n    password: Option<String>,\n    secure: Option<bool>,\n    config: Option<String>,\n    connection: Option<String>,\n    cluster: Option<String>,\n    history: Option<bool>,\n    internal_queries: Option<bool>,\n    limit: Option<u64>,\n    logs_order: Option<LogsOrder>,\n    skip_unavailable_shards: Option<bool>,\n}\n\n#[derive(Deserialize, Default)]\n#[serde(default)]\nstruct ChDigViewConfig {\n    delay_interval: Option<u64>,\n    group_by: Option<bool>,\n    no_subqueries: Option<bool>,\n    start: Option<String>,\n    end: Option<String>,\n    wrap: Option<bool>,\n    no_strip_hostname_suffix: Option<bool>,\n    queries_limit: Option<u64>,\n}\n\n#[derive(Deserialize, Default)]\n#[serde(default)]\nstruct ChDigServiceConfig {\n    log: Option<String>,\n    pastila_clickhouse_host: Option<String>,\n    pastila_url: Option<String>,\n}\n\nfn read_yaml_clickhouse_client_config(path: &str) -> Result<ClickHouseClientConfig> {\n    let file = fs::File::open(path)?;\n    let reader = io::BufReader::new(file);\n    let doc = YamlDeserializer::from_reader(reader);\n    let yaml_config = YamlClickHouseClientConfig::deserialize(doc)?;\n\n    let config = ClickHouseClientConfig {\n        user: yaml_config.user,\n        password: yaml_config.password,\n        secure: yaml_config.secure,\n        skip_verify: yaml_config.skip_verify,\n        accept_invalid_certificate: yaml_config.accept_invalid_certificate,\n        open_ssl: yaml_config.open_ssl,\n        history_file: yaml_config.history_file,\n        connections_credentials: yaml_config\n            .connections_credentials\n            .unwrap_or_default()\n            .into_values()\n            .collect(),\n    };\n    return Ok(config);\n}\nfn read_xml_clickhouse_client_config(path: &str) -> Result<ClickHouseClientConfig> {\n    let file = fs::File::open(path)?;\n    let reader = io::BufReader::new(file);\n    let mut doc = XmlDeserializer::from_reader(reader);\n    let xml_config = XmlClickHouseClientConfig::deserialize(&mut doc)?;\n\n    let config = ClickHouseClientConfig {\n        user: xml_config.user,\n        password: xml_config.password,\n        secure: xml_config.secure,\n        skip_verify: xml_config.skip_verify,\n        accept_invalid_certificate: xml_config.accept_invalid_certificate,\n        open_ssl: xml_config.open_ssl,\n        history_file: xml_config.history_file,\n        connections_credentials: xml_config\n            .connections_credentials\n            .unwrap_or_default()\n            .connection\n            .unwrap_or_default(),\n    };\n    return Ok(config);\n}\nmacro_rules! try_xml {\n    ( $path:expr ) => {\n        if path::Path::new($path).exists() {\n            log::info!(\"Loading {}\", $path);\n            return Some(read_xml_clickhouse_client_config($path));\n        }\n    };\n}\nmacro_rules! try_yaml {\n    ( $path:expr ) => {\n        if path::Path::new($path).exists() {\n            log::info!(\"Loading {}\", $path);\n            return Some(read_yaml_clickhouse_client_config($path));\n        }\n    };\n}\nfn try_default_clickhouse_client_config() -> Option<Result<ClickHouseClientConfig>> {\n    // Try XDG standard directory first\n    if let Ok(xdg_config_home) = env::var(\"XDG_CONFIG_HOME\") {\n        try_xml!(&format!(\"{}/clickhouse/config.xml\", xdg_config_home));\n        try_yaml!(&format!(\"{}/clickhouse/config.yml\", xdg_config_home));\n        try_yaml!(&format!(\"{}/clickhouse/config.yaml\", xdg_config_home));\n    }\n\n    // Try HOME-based locations\n    if let Ok(home) = env::var(\"HOME\") {\n        // XDG fallback: ~/.config\n        try_xml!(&format!(\"{}/.config/clickhouse/config.xml\", home));\n        try_yaml!(&format!(\"{}/.config/clickhouse/config.yml\", home));\n        try_yaml!(&format!(\"{}/.config/clickhouse/config.yaml\", home));\n\n        // Legacy location: ~/.clickhouse-client\n        try_xml!(&format!(\"{}/.clickhouse-client/config.xml\", home));\n        try_yaml!(&format!(\"{}/.clickhouse-client/config.yml\", home));\n        try_yaml!(&format!(\"{}/.clickhouse-client/config.yaml\", home));\n    }\n\n    // System-wide configuration\n    try_xml!(\"/etc/clickhouse-client/config.xml\");\n    try_yaml!(\"/etc/clickhouse-client/config.yml\");\n    try_yaml!(\"/etc/clickhouse-client/config.yaml\");\n\n    return None;\n}\n\nfn read_chdig_config(path: &str) -> Result<ChDigConfig> {\n    let file = fs::File::open(path)?;\n    let reader = io::BufReader::new(file);\n    let doc = YamlDeserializer::from_reader(reader);\n    let config = ChDigConfig::deserialize(doc)?;\n    return Ok(config);\n}\n\nmacro_rules! try_chdig_yaml {\n    ( $path:expr ) => {\n        if path::Path::new($path).exists() {\n            log::info!(\"Loading chdig config {}\", $path);\n            return Some(read_chdig_config($path));\n        }\n    };\n}\n\nfn try_default_chdig_config() -> Option<Result<ChDigConfig>> {\n    if let Ok(xdg_config_home) = env::var(\"XDG_CONFIG_HOME\") {\n        try_chdig_yaml!(&format!(\"{}/chdig/config.yaml\", xdg_config_home));\n        try_chdig_yaml!(&format!(\"{}/chdig/config.yml\", xdg_config_home));\n    }\n\n    if let Ok(home) = env::var(\"HOME\") {\n        try_chdig_yaml!(&format!(\"{}/.config/chdig/config.yaml\", home));\n        try_chdig_yaml!(&format!(\"{}/.config/chdig/config.yml\", home));\n\n        try_chdig_yaml!(&format!(\"{}/.chdig.yaml\", home));\n        try_chdig_yaml!(&format!(\"{}/.chdig.yml\", home));\n    }\n\n    try_chdig_yaml!(\"/etc/chdig/config.yaml\");\n    try_chdig_yaml!(\"/etc/chdig/config.yml\");\n\n    return None;\n}\n\nfn apply_chdig_config(options: &mut ChDigOptions, config: &ChDigConfig) {\n    // clickhouse section\n    let ch = &config.clickhouse;\n    if options.clickhouse.url.is_none() {\n        options.clickhouse.url = ch.url.clone();\n    }\n    if options.clickhouse.host.is_none() {\n        options.clickhouse.host = ch.host.clone();\n    }\n    if options.clickhouse.port.is_none() {\n        options.clickhouse.port = ch.port;\n    }\n    if options.clickhouse.user.is_none() {\n        options.clickhouse.user = ch.user.clone();\n    }\n    if options.clickhouse.password.is_none() {\n        options.clickhouse.password = ch.password.clone();\n    }\n    if !options.clickhouse.secure\n        && let Some(secure) = ch.secure\n    {\n        options.clickhouse.secure = secure;\n    }\n    if options.clickhouse.config.is_none() {\n        options.clickhouse.config = ch.config.clone();\n    }\n    if options.clickhouse.connection.is_none() {\n        options.clickhouse.connection = ch.connection.clone();\n    }\n    if options.clickhouse.cluster.is_none() {\n        options.clickhouse.cluster = ch.cluster.clone();\n    }\n    if !options.clickhouse.history\n        && let Some(history) = ch.history\n    {\n        options.clickhouse.history = history;\n    }\n    if !options.clickhouse.internal_queries\n        && let Some(internal_queries) = ch.internal_queries\n    {\n        options.clickhouse.internal_queries = internal_queries;\n    }\n    if let Some(limit) = ch.limit {\n        options.clickhouse.limit = limit;\n    }\n    if options.clickhouse.logs_order == LogsOrder::Asc\n        && let Some(logs_order) = ch.logs_order\n    {\n        options.clickhouse.logs_order = logs_order;\n    }\n    if !options.clickhouse.skip_unavailable_shards\n        && let Some(skip) = ch.skip_unavailable_shards\n    {\n        options.clickhouse.skip_unavailable_shards = skip;\n    }\n\n    // view section\n    let view = &config.view;\n    if let Some(delay) = view.delay_interval {\n        options.view.delay_interval = time::Duration::from_millis(delay);\n    }\n    if !options.view.group_by\n        && let Some(group_by) = view.group_by\n    {\n        options.view.group_by = group_by;\n    }\n    if !options.view.no_subqueries\n        && let Some(no_subqueries) = view.no_subqueries\n    {\n        options.view.no_subqueries = no_subqueries;\n    }\n    if let Some(ref start) = view.start\n        && let Ok(parsed) = RelativeDateTime::from_str(start)\n    {\n        options.view.start = parsed;\n    }\n    if let Some(ref end) = view.end\n        && let Ok(parsed) = RelativeDateTime::from_str(end)\n    {\n        options.view.end = parsed;\n    }\n    if !options.view.wrap\n        && let Some(wrap) = view.wrap\n    {\n        options.view.wrap = wrap;\n    }\n    if !options.view.no_strip_hostname_suffix\n        && let Some(no_strip) = view.no_strip_hostname_suffix\n    {\n        options.view.no_strip_hostname_suffix = no_strip;\n    }\n    if let Some(queries_limit) = view.queries_limit {\n        options.view.queries_limit = queries_limit;\n    }\n\n    // service section\n    let svc = &config.service;\n    if options.service.log.is_none() {\n        options.service.log = svc.log.clone();\n    }\n    if let Some(ref host) = svc.pastila_clickhouse_host {\n        options.service.pastila_clickhouse_host = host.clone();\n    }\n    if let Some(ref url) = svc.pastila_url {\n        options.service.pastila_url = url.clone();\n    }\n\n    // perfetto section\n    options.perfetto = config.perfetto.clone();\n}\n\nfn parse_url(options: &ClickHouseOptions) -> Result<url::Url> {\n    let url_str = options.url.clone().unwrap_or_default();\n    let url = if url_str.contains(\"://\") {\n        // url::Url::scheme() does not works as we want,\n        // since for \"foo:bar@127.1\" the scheme will be \"foo\",\n        url::Url::parse(&url_str)?\n    } else {\n        url::Url::parse(&format!(\"tcp://{}\", &url_str))?\n    };\n    Ok(url)\n}\n\npub fn is_cloud_host(host: &str) -> bool {\n    let host = host.to_lowercase();\n    host.ends_with(\".clickhouse.cloud\")\n        || host.ends_with(\".clickhouse-staging.com\")\n        || host.ends_with(\".clickhouse-dev.com\")\n}\n\nfn is_local_address(host: &str) -> bool {\n    let localhost = SocketAddr::from(([127, 0, 0, 1], 0));\n    let addresses = format!(\"{}:0\", host).to_socket_addrs();\n    log::trace!(\"Resolving: {} -> {:?}\", host, addresses);\n    if let Ok(addresses) = addresses {\n        for address in addresses {\n            if address != localhost {\n                log::trace!(\"Address {:?} is not local\", address);\n                return false;\n            }\n        }\n        log::trace!(\"Host {} is local\", host);\n        return true;\n    }\n    return false;\n}\n\nfn set_password_from_opt(url: &mut url::Url, password: Option<String>, force: bool) -> Result<()> {\n    if let Some(password) = password\n        && (url.password().is_none() || force)\n    {\n        url.set_password(Some(\n            &utf8_percent_encode(&password, NON_ALPHANUMERIC).to_string(),\n        ))\n        .map_err(|_| anyhow!(\"password is invalid\"))?;\n    }\n    Ok(())\n}\n\nfn clickhouse_url_defaults(\n    options: &mut ClickHouseOptions,\n    config: Option<ClickHouseClientConfig>,\n) -> Result<()> {\n    let mut url = parse_url(options)?;\n    let connection = &options.connection;\n    let mut secure: Option<bool>;\n    let mut skip_verify: Option<bool>;\n    let mut ca_certificate: Option<String>;\n    let mut client_certificate: Option<String>;\n    let mut client_private_key: Option<String>;\n\n    {\n        let pairs: HashMap<_, _> = url.query_pairs().into_owned().collect();\n        secure = pairs.get(\"secure\").and_then(|v| bool::from_str(v).ok());\n        skip_verify = pairs\n            .get(\"skip_verify\")\n            .and_then(|v| bool::from_str(v).ok());\n        ca_certificate = pairs.get(\"ca_certificate\").cloned();\n        client_certificate = pairs.get(\"client_certificate\").cloned();\n        client_private_key = pairs.get(\"client_private_key\").cloned();\n    }\n\n    // host should be set first, since url crate does not allow to set user/password without host.\n    let mut has_host = url.host().is_some();\n    if !has_host {\n        url.set_host(Some(\"127.1\"))?;\n    }\n\n    // Apply clickhouse-client compatible options\n    if let Some(host) = &options.host {\n        url.set_host(Some(host))?;\n        has_host = true;\n    }\n    if let Some(port) = options.port {\n        url.set_port(Some(port))\n            .map_err(|_| anyhow!(\"port is invalid\"))?;\n    }\n    if let Some(user) = &options.user {\n        url.set_username(user)\n            .map_err(|_| anyhow!(\"username is invalid\"))?;\n    }\n    set_password_from_opt(&mut url, options.password.clone(), true)?;\n    if options.secure {\n        secure = Some(true);\n    }\n\n    //\n    // config\n    //\n    if let Some(config) = config {\n        if url.username().is_empty()\n            && let Some(user) = config.user\n        {\n            url.set_username(user.as_str())\n                .map_err(|_| anyhow!(\"username is invalid\"))?;\n        }\n        set_password_from_opt(&mut url, config.password, false)?;\n        if secure.is_none()\n            && let Some(conf_secure) = config.secure\n        {\n            secure = Some(conf_secure);\n        }\n\n        let ssl_client = config.open_ssl.and_then(|ssl| ssl.client);\n        if skip_verify.is_none()\n            && let Some(conf_skip_verify) = config\n                .skip_verify\n                .or(config.accept_invalid_certificate)\n                .or_else(|| {\n                    ssl_client\n                        .as_ref()\n                        .map(|client| client.verification_mode == Some(\"none\".to_string()))\n                })\n        {\n            skip_verify = Some(conf_skip_verify);\n        }\n        if ca_certificate.is_none()\n            && let Some(conf_ca_certificate) = ssl_client.as_ref().map(|v| v.ca_config.clone())\n        {\n            ca_certificate = conf_ca_certificate.clone();\n        }\n        if client_certificate.is_none()\n            && let Some(conf_client_certificate) =\n                ssl_client.as_ref().map(|v| v.certificate_file.clone())\n        {\n            client_certificate = conf_client_certificate.clone();\n        }\n        if client_private_key.is_none()\n            && let Some(conf_client_private_key) =\n                ssl_client.as_ref().map(|v| v.private_key_file.clone())\n        {\n            client_private_key = conf_client_private_key.clone();\n        }\n\n        if options.history_file.is_none() {\n            options.history_file = config.history_file;\n        }\n\n        //\n        // connections_credentials section from config\n        //\n        let mut connection_found = false;\n        if let Some(connection) = connection {\n            for c in config.connections_credentials.iter() {\n                if &c.name != connection {\n                    continue;\n                }\n                if connection_found {\n                    panic!(\"Multiple connections had been matched. Fix you config.xml\");\n                }\n\n                connection_found = true;\n                if !has_host && let Some(hostname) = &c.hostname {\n                    url.set_host(Some(hostname.as_str()))?;\n                }\n                if url.port().is_none()\n                    && let Some(port) = c.port\n                {\n                    url.set_port(Some(port))\n                        .map_err(|_| anyhow!(\"Cannot set port\"))?;\n                }\n                if url.username().is_empty()\n                    && let Some(user) = &c.user\n                {\n                    url.set_username(user.as_str())\n                        .map_err(|_| anyhow!(\"username is invalid\"))?;\n                }\n                set_password_from_opt(&mut url, c.password.clone(), false)?;\n                if secure.is_none()\n                    && let Some(con_secure) = c.secure\n                {\n                    secure = Some(con_secure);\n                }\n                if skip_verify.is_none()\n                    && let Some(con_skip_verify) = c.skip_verify\n                {\n                    skip_verify = Some(con_skip_verify);\n                }\n                if ca_certificate.is_none() {\n                    ca_certificate = c.ca_certificate.clone();\n                }\n                if client_certificate.is_none() {\n                    client_certificate = c.client_certificate.clone();\n                }\n                if client_private_key.is_none() {\n                    client_private_key = c.client_private_key.clone();\n                }\n                if options.history_file.is_none() {\n                    options.history_file = c.history_file.clone();\n                }\n            }\n\n            if !connection_found {\n                panic!(\"Connection {} was not found\", connection);\n            }\n        }\n    } else if connection.is_some() {\n        panic!(\"No client config had been read, while --connection was set\");\n    }\n\n    // Cloud hosts always use secure connections unless explicitly disabled\n    if secure.is_none() && is_cloud_host(&url.host().ok_or_else(|| anyhow!(\"No host\"))?.to_string())\n    {\n        secure = Some(true);\n    }\n\n    // - 9000 for non secure\n    // - 9440 for secure\n    if url.port().is_none() {\n        url.set_port(Some(if secure.unwrap_or_default() {\n            9440\n        } else {\n            9000\n        }))\n        .map_err(|_| anyhow!(\"Cannot set port\"))?;\n    }\n\n    let mut url_safe = url.clone();\n\n    // url_safe\n    if url_safe.password().is_some() {\n        url_safe\n            .set_password(None)\n            .map_err(|_| anyhow!(\"Cannot hide password\"))?;\n    }\n    options.url_safe = url_safe.to_string();\n\n    // Switch database to \"system\", since \"default\" may not be present.\n    if url_safe.path().is_empty() || url_safe.path() == \"/\" {\n        url.set_path(\"/system\");\n    }\n\n    // some default settings in URL\n    {\n        let host_str = url.host().ok_or_else(|| anyhow!(\"No host\"))?.to_string();\n        let pairs: HashMap<_, _> = url_safe.query_pairs().into_owned().collect();\n        let is_local = is_local_address(&host_str);\n        let is_cloud = is_cloud_host(&host_str);\n        let mut mut_pairs = url.query_pairs_mut();\n        // Enable compression in non-local network (in the same way as clickhouse does by default)\n        if !pairs.contains_key(\"compression\") && !is_local {\n            mut_pairs.append_pair(\"compression\", \"lz4\");\n        }\n        if !pairs.contains_key(\"connection_timeout\") {\n            if is_cloud {\n                // Cloud services may need time to wake up from idle state\n                mut_pairs.append_pair(\"connection_timeout\", \"600s\");\n            } else {\n                // default is: 500ms (too small)\n                mut_pairs.append_pair(\"connection_timeout\", \"5s\");\n            }\n        }\n        // Note, right now even on a big clusters, everything works within default timeout (180s),\n        // but just to make it \"user friendly\" even for some obscure setups, let's increase the\n        // timeout still.\n        if !pairs.contains_key(\"query_timeout\") {\n            mut_pairs.append_pair(\"query_timeout\", \"600s\");\n        }\n        if let Some(secure) = secure {\n            mut_pairs.append_pair(\"secure\", secure.to_string().as_str());\n        }\n        if let Some(skip_verify) = skip_verify {\n            mut_pairs.append_pair(\"skip_verify\", skip_verify.to_string().as_str());\n        }\n        if let Some(ca_certificate) = ca_certificate {\n            mut_pairs.append_pair(\"ca_certificate\", &ca_certificate);\n        }\n        if let Some(client_certificate) = client_certificate {\n            mut_pairs.append_pair(\"client_certificate\", &client_certificate);\n        }\n        if let Some(client_private_key) = client_private_key {\n            mut_pairs.append_pair(\"client_private_key\", &client_private_key);\n        }\n        if options.skip_unavailable_shards {\n            mut_pairs.append_pair(\"skip_unavailable_shards\", \"1\");\n        }\n    }\n\n    options.url = Some(url.to_string());\n\n    return Ok(());\n}\n\nfn adjust_defaults(options: &mut ChDigOptions) -> Result<()> {\n    // Load and apply chdig config before clickhouse client config,\n    // so that e.g. clickhouse.config from chdig config feeds into the client config loading.\n    let chdig_config = if let Some(ref path) = options.service.chdig_config {\n        Some(read_chdig_config(path)?)\n    } else if let Some(config) = try_default_chdig_config() {\n        Some(config?)\n    } else {\n        None\n    };\n    if let Some(ref chdig_config) = chdig_config {\n        apply_chdig_config(options, chdig_config);\n    }\n\n    let config = if let Some(user_config) = &options.clickhouse.config {\n        if user_config.to_lowercase().ends_with(\".xml\") {\n            Some(read_xml_clickhouse_client_config(user_config)?)\n        } else {\n            Some(read_yaml_clickhouse_client_config(user_config)?)\n        }\n    } else if let Some(config) = try_default_clickhouse_client_config() {\n        Some(config?)\n    } else {\n        None\n    };\n    clickhouse_url_defaults(&mut options.clickhouse, config)?;\n\n    // FIXME: overrides_with works before default_value_if, hence --no-group-by never works\n    if options.view.no_group_by {\n        options.view.group_by = false;\n    }\n\n    return Ok(());\n}\n\npub fn parse_from<I, T>(itr: I) -> Result<ChDigOptions>\nwhere\n    I: IntoIterator<Item = T>,\n    T: Into<OsString> + Clone,\n{\n    let mut options = ChDigOptions::parse_from(itr);\n\n    // Generate autocompletion\n    if let Some(shell) = options.service.completion {\n        let mut cmd = ChDigOptions::command();\n        let name = cmd.get_name().to_string();\n        generate(shell, &mut cmd, name, &mut io::stdout());\n        process::exit(0);\n    }\n\n    adjust_defaults(&mut options)?;\n\n    return Ok(options);\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use pretty_assertions::assert_eq;\n\n    #[test]\n    fn test_url_parse_no_proto() {\n        assert_eq!(\n            parse_url(&ClickHouseOptions::default()).unwrap(),\n            url::Url::parse(\"tcp://\").unwrap()\n        );\n    }\n\n    #[test]\n    fn test_url_parse_user() {\n        let mut options = ClickHouseOptions {\n            user: Some(\"foo\".into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, None).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap(),\n            url::Url::parse(\"tcp://foo@127.1:9000/system?connection_timeout=5s&query_timeout=600s\")\n                .unwrap()\n        );\n    }\n\n    #[test]\n    fn test_url_parse_password() {\n        let mut options = ClickHouseOptions {\n            password: Some(\"foo\".into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, None).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap(),\n            url::Url::parse(\n                \"tcp://:foo@127.1:9000/system?connection_timeout=5s&query_timeout=600s\"\n            )\n            .unwrap()\n        );\n    }\n\n    #[test]\n    fn test_url_parse_password_with_special_chars() {\n        let password = \"!@#$%41^&*(%)\";\n        let mut options = ClickHouseOptions {\n            password: Some(password.into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, None).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap(),\n            url::Url::parse(\"tcp://:%21%40%23%24%2541%5E%26%2A%28%25%29@127.1:9000/system?connection_timeout=5s&query_timeout=600s\").\n            unwrap()\n        );\n    }\n\n    #[test]\n    fn test_url_parse_port() {\n        let mut options = ClickHouseOptions {\n            port: Some(9440),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, None).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap(),\n            url::Url::parse(\"tcp://127.1:9440/system?connection_timeout=5s&query_timeout=600s\")\n                .unwrap()\n        );\n    }\n\n    #[test]\n    fn test_url_parse_secure() {\n        let mut options = ClickHouseOptions {\n            secure: true,\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, None).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap(),\n            url::Url::parse(\n                \"tcp://127.1:9440/system?connection_timeout=5s&query_timeout=600s&secure=true\"\n            )\n            .unwrap()\n        );\n    }\n\n    #[test]\n    fn test_config_empty() {\n        assert_eq!(\n            read_xml_clickhouse_client_config(\"tests/configs/empty.xml\").is_ok(),\n            true\n        );\n        assert_eq!(\n            read_yaml_clickhouse_client_config(\"tests/configs/empty.yaml\").is_ok(),\n            true\n        );\n    }\n\n    #[test]\n    fn test_config_unknown_directives() {\n        assert_eq!(\n            read_xml_clickhouse_client_config(\"tests/configs/unknown_directives.xml\").is_ok(),\n            true\n        );\n        assert_eq!(\n            read_yaml_clickhouse_client_config(\"tests/configs/unknown_directives.yaml\").is_ok(),\n            true\n        );\n    }\n\n    #[test]\n    fn test_config_basic() {\n        let xml_config = read_xml_clickhouse_client_config(\"tests/configs/basic.xml\").unwrap();\n        let yaml_config = read_yaml_clickhouse_client_config(\"tests/configs/basic.yaml\").unwrap();\n        let config = ClickHouseClientConfig {\n            user: Some(\"foo\".into()),\n            password: Some(\"bar\".into()),\n            ..Default::default()\n        };\n        assert_eq!(config, xml_config);\n        assert_eq!(config, yaml_config);\n    }\n\n    #[test]\n    fn test_config_tls() {\n        let xml_config = read_xml_clickhouse_client_config(\"tests/configs/tls.xml\").unwrap();\n        let yaml_config = read_yaml_clickhouse_client_config(\"tests/configs/tls.yaml\").unwrap();\n        let config = ClickHouseClientConfig {\n            secure: Some(true),\n            open_ssl: Some(ClickHouseClientConfigOpenSSL {\n                client: Some(ClickHouseClientConfigOpenSSLClient {\n                    verification_mode: Some(\"strict\".into()),\n                    certificate_file: Some(\"cert\".into()),\n                    private_key_file: Some(\"key\".into()),\n                    ca_config: Some(\"ca\".into()),\n                }),\n            }),\n            ..Default::default()\n        };\n        assert_eq!(config, xml_config);\n        assert_eq!(config, yaml_config);\n    }\n\n    #[test]\n    fn test_config_tls_applying_config_to_connection_url() {\n        let config = read_yaml_clickhouse_client_config(\"tests/configs/tls.yaml\").ok();\n        let mut options = ClickHouseOptions {\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, config).unwrap();\n        let url = parse_url(&options).unwrap();\n        let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n\n        assert_eq!(args.get(\"secure\"), Some(&\"true\".into()));\n        assert_eq!(args.get(\"ca_certificate\"), Some(&\"ca\".into()));\n        assert_eq!(args.get(\"client_certificate\"), Some(&\"cert\".into()));\n        assert_eq!(args.get(\"client_private_key\"), Some(&\"key\".into()));\n        assert_eq!(args.get(\"skip_verify\"), Some(&\"false\".into()));\n    }\n\n    #[test]\n    fn test_config_connections_applying_config_to_connection_url_play() {\n        let config = read_yaml_clickhouse_client_config(\"tests/configs/connections.yaml\").ok();\n        let mut options = ClickHouseOptions {\n            connection: Some(\"play\".into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, config).unwrap();\n        let url = parse_url(&options).unwrap();\n        let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n\n        assert_eq!(url.host().unwrap().to_string(), \"play.clickhouse.com\");\n        assert_eq!(args.get(\"secure\"), Some(&\"true\".into()));\n        assert_eq!(args.contains_key(\"skip_verify\"), false);\n    }\n\n    #[test]\n    fn test_config_connections_applying_config_to_connection_url_play_tls() {\n        let config = read_yaml_clickhouse_client_config(\"tests/configs/connections.yaml\").ok();\n        let mut options = ClickHouseOptions {\n            connection: Some(\"play-tls\".into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, config).unwrap();\n        let url = parse_url(&options).unwrap();\n        let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n\n        assert_eq!(url.host().unwrap().to_string(), \"play.clickhouse.com\");\n        assert_eq!(args.get(\"secure\"), Some(&\"true\".into()));\n        assert_eq!(args.get(\"ca_certificate\"), Some(&\"ca\".into()));\n        assert_eq!(args.get(\"client_certificate\"), Some(&\"cert\".into()));\n        assert_eq!(args.get(\"client_private_key\"), Some(&\"key\".into()));\n        assert_eq!(args.get(\"skip_verify\"), Some(&\"true\".into()));\n    }\n\n    #[test]\n    fn test_config_connections_host() {\n        let config = read_yaml_clickhouse_client_config(\"tests/configs/connections.yaml\").ok();\n        let mut options = ClickHouseOptions {\n            connection: Some(\"play-tls\".into()),\n            host: Some(\"localhost\".into()),\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, config).unwrap();\n        assert_eq!(\n            parse_url(&options).unwrap().host().unwrap().to_string(),\n            \"localhost\"\n        );\n    }\n\n    #[test]\n    fn test_config_apply_accept_invalid_certificate() {\n        let config =\n            read_yaml_clickhouse_client_config(\"tests/configs/accept_invalid_certificate.yaml\")\n                .unwrap();\n        assert_eq!(config.accept_invalid_certificate, Some(true));\n\n        let mut options = ClickHouseOptions {\n            ..Default::default()\n        };\n        clickhouse_url_defaults(&mut options, Some(config)).unwrap();\n\n        let url = parse_url(&options).unwrap();\n        let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n        assert_eq!(args.get(\"skip_verify\"), Some(&\"true\".into()));\n    }\n\n    #[test]\n    fn test_cloud_defaults() {\n        {\n            let mut options = ClickHouseOptions {\n                host: Some(\"uzg8q0g12h.eu-central-1.aws.clickhouse.cloud\".into()),\n                ..Default::default()\n            };\n            clickhouse_url_defaults(&mut options, None).unwrap();\n            let url = parse_url(&options).unwrap();\n            let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n\n            assert_eq!(args.get(\"secure\"), Some(&\"true\".into()));\n            assert_eq!(args.get(\"connection_timeout\"), Some(&\"600s\".into()));\n        }\n\n        // Note, checking for ClickHouseOptions{secure: false} does not make sense, since it is the default\n\n        {\n            let mut options = ClickHouseOptions {\n                url: Some(\"uzg8q0g12h.eu-central-1.aws.clickhouse.cloud/?secure=false&connection_timeout=1ms\".into()),\n                ..Default::default()\n            };\n            clickhouse_url_defaults(&mut options, None).unwrap();\n            let url = parse_url(&options).unwrap();\n            let args: HashMap<_, _> = url.query_pairs().into_owned().collect();\n\n            assert_eq!(args.get(\"secure\"), Some(&\"false\".into()));\n            assert_eq!(args.get(\"connection_timeout\"), Some(&\"1ms\".into()));\n        }\n    }\n\n    #[test]\n    fn test_chdig_config_empty() {\n        let config = read_chdig_config(\"tests/configs/chdig_empty.yaml\").unwrap();\n        assert!(config.clickhouse.url.is_none());\n        assert!(config.clickhouse.host.is_none());\n        assert!(config.view.delay_interval.is_none());\n        assert!(config.service.log.is_none());\n    }\n\n    #[test]\n    fn test_chdig_config_basic() {\n        let config = read_chdig_config(\"tests/configs/chdig_basic.yaml\").unwrap();\n\n        assert_eq!(\n            config.clickhouse.url.as_deref(),\n            Some(\"tcp://config-host:9000\")\n        );\n        assert_eq!(config.clickhouse.host.as_deref(), Some(\"config-host\"));\n        assert_eq!(config.clickhouse.port, Some(9440));\n        assert_eq!(config.clickhouse.user.as_deref(), Some(\"config_user\"));\n        assert_eq!(config.clickhouse.password.as_deref(), Some(\"config_pass\"));\n        assert_eq!(config.clickhouse.secure, Some(true));\n        assert_eq!(config.clickhouse.cluster.as_deref(), Some(\"my_cluster\"));\n        assert_eq!(config.clickhouse.history, Some(true));\n        assert_eq!(config.clickhouse.internal_queries, Some(true));\n        assert_eq!(config.clickhouse.limit, Some(50000));\n        assert_eq!(config.clickhouse.skip_unavailable_shards, Some(true));\n\n        assert_eq!(config.view.delay_interval, Some(5000));\n        assert_eq!(config.view.group_by, Some(true));\n        assert_eq!(config.view.no_subqueries, Some(true));\n        assert_eq!(config.view.start.as_deref(), Some(\"2hours\"));\n        assert_eq!(config.view.end.as_deref(), Some(\"30min\"));\n        assert_eq!(config.view.wrap, Some(true));\n        assert_eq!(config.view.no_strip_hostname_suffix, Some(true));\n        assert_eq!(config.view.queries_limit, Some(500));\n\n        assert_eq!(config.service.log.as_deref(), Some(\"/tmp/chdig.log\"));\n        assert_eq!(\n            config.service.pastila_clickhouse_host.as_deref(),\n            Some(\"https://custom.host/\")\n        );\n        assert_eq!(\n            config.service.pastila_url.as_deref(),\n            Some(\"https://custom.pastila/\")\n        );\n    }\n\n    #[test]\n    fn test_chdig_config_partial() {\n        let config = read_chdig_config(\"tests/configs/chdig_partial.yaml\").unwrap();\n\n        assert_eq!(config.clickhouse.host.as_deref(), Some(\"partial-host\"));\n        assert_eq!(config.clickhouse.user.as_deref(), Some(\"partial_user\"));\n        assert!(config.clickhouse.url.is_none());\n        assert!(config.clickhouse.port.is_none());\n        assert!(config.clickhouse.secure.is_none());\n\n        assert_eq!(config.view.delay_interval, Some(10000));\n        assert!(config.view.group_by.is_none());\n        assert!(config.view.wrap.is_none());\n\n        assert!(config.service.log.is_none());\n    }\n\n    #[test]\n    fn test_chdig_config_apply_clickhouse() {\n        let config = read_chdig_config(\"tests/configs/chdig_basic.yaml\").unwrap();\n        let mut options = ChDigOptions::parse_from([\"chdig\"]);\n        apply_chdig_config(&mut options, &config);\n\n        assert_eq!(options.clickhouse.host.as_deref(), Some(\"config-host\"));\n        assert_eq!(options.clickhouse.user.as_deref(), Some(\"config_user\"));\n        assert_eq!(options.clickhouse.password.as_deref(), Some(\"config_pass\"));\n        assert_eq!(options.clickhouse.port, Some(9440));\n        assert_eq!(options.clickhouse.secure, true);\n        assert_eq!(options.clickhouse.cluster.as_deref(), Some(\"my_cluster\"));\n        assert_eq!(options.clickhouse.history, true);\n        assert_eq!(options.clickhouse.internal_queries, true);\n        assert_eq!(options.clickhouse.limit, 50000);\n        assert_eq!(options.clickhouse.skip_unavailable_shards, true);\n    }\n\n    #[test]\n    fn test_chdig_config_apply_view() {\n        let config = read_chdig_config(\"tests/configs/chdig_basic.yaml\").unwrap();\n        let mut options = ChDigOptions::parse_from([\"chdig\"]);\n        apply_chdig_config(&mut options, &config);\n\n        assert_eq!(\n            options.view.delay_interval,\n            time::Duration::from_millis(5000)\n        );\n        assert_eq!(options.view.group_by, true);\n        assert_eq!(options.view.no_subqueries, true);\n        assert_eq!(options.view.wrap, true);\n        assert_eq!(options.view.no_strip_hostname_suffix, true);\n        assert_eq!(options.view.queries_limit, 500);\n        assert_eq!(options.service.log.as_deref(), Some(\"/tmp/chdig.log\"));\n        assert_eq!(\n            options.service.pastila_clickhouse_host,\n            \"https://custom.host/\"\n        );\n        assert_eq!(options.service.pastila_url, \"https://custom.pastila/\");\n    }\n\n    #[test]\n    fn test_chdig_config_perfetto() {\n        let config = read_chdig_config(\"tests/configs/chdig_basic.yaml\").unwrap();\n\n        assert_eq!(config.perfetto.opentelemetry_span_log, true);\n        assert_eq!(config.perfetto.trace_log, true);\n        assert_eq!(config.perfetto.query_metric_log, true);\n        assert_eq!(config.perfetto.part_log, false);\n        assert_eq!(config.perfetto.query_thread_log, true);\n        assert_eq!(config.perfetto.text_log, false);\n\n        let mut options = ChDigOptions::parse_from([\"chdig\"]);\n        apply_chdig_config(&mut options, &config);\n\n        assert_eq!(options.perfetto.opentelemetry_span_log, true);\n        assert_eq!(options.perfetto.part_log, false);\n        assert_eq!(options.perfetto.query_metric_log, true);\n    }\n\n    #[test]\n    fn test_chdig_config_perfetto_defaults() {\n        let config = read_chdig_config(\"tests/configs/chdig_empty.yaml\").unwrap();\n\n        assert_eq!(config.perfetto.opentelemetry_span_log, true);\n        assert_eq!(config.perfetto.trace_log, true);\n        assert_eq!(config.perfetto.query_metric_log, false);\n        assert_eq!(config.perfetto.part_log, true);\n        assert_eq!(config.perfetto.query_thread_log, true);\n        assert_eq!(config.perfetto.text_log, true);\n    }\n\n    #[test]\n    fn test_chdig_config_cli_overrides_config() {\n        let config = read_chdig_config(\"tests/configs/chdig_basic.yaml\").unwrap();\n        let mut options = ChDigOptions::parse_from([\n            \"chdig\",\n            \"--host\",\n            \"cli-host\",\n            \"--user\",\n            \"cli_user\",\n            \"--secure\",\n            \"--log\",\n            \"/tmp/cli.log\",\n        ]);\n        apply_chdig_config(&mut options, &config);\n\n        // Option<T> fields: CLI wins when set\n        assert_eq!(options.clickhouse.host.as_deref(), Some(\"cli-host\"));\n        assert_eq!(options.clickhouse.user.as_deref(), Some(\"cli_user\"));\n        assert_eq!(options.service.log.as_deref(), Some(\"/tmp/cli.log\"));\n\n        // Bool flags: CLI true wins\n        assert_eq!(options.clickhouse.secure, true);\n\n        // Option<T> fields not set on CLI come from config\n        assert_eq!(options.clickhouse.password.as_deref(), Some(\"config_pass\"));\n        assert_eq!(options.clickhouse.cluster.as_deref(), Some(\"my_cluster\"));\n\n        // Non-Option fields: config always applies\n        assert_eq!(options.clickhouse.limit, 50000);\n        assert_eq!(\n            options.view.delay_interval,\n            time::Duration::from_millis(5000)\n        );\n    }\n}\n"
  },
  {
    "path": "src/interpreter/perfetto.rs",
    "content": "use crate::interpreter::Query;\nuse crate::interpreter::clickhouse::{Columns, MetricLogRow, QueryMetricRow};\nuse chrono::{DateTime, Local};\nuse chrono_tz::Tz;\nuse perfetto_protos::android_log::AndroidLogPacket;\nuse perfetto_protos::android_log::android_log_packet::LogEvent;\nuse perfetto_protos::android_log_constants::AndroidLogPriority;\nuse perfetto_protos::clock_snapshot::ClockSnapshot;\nuse perfetto_protos::clock_snapshot::clock_snapshot::Clock;\nuse perfetto_protos::counter_descriptor::CounterDescriptor;\nuse perfetto_protos::counter_descriptor::counter_descriptor::Unit;\nuse perfetto_protos::debug_annotation::DebugAnnotation;\nuse perfetto_protos::debug_annotation::debug_annotation as da;\nuse perfetto_protos::interned_data::InternedData;\nuse perfetto_protos::profile_common::{Callstack, Frame, InternedString, Mapping};\nuse perfetto_protos::profile_packet::StreamingProfilePacket;\nuse perfetto_protos::thread_descriptor::ThreadDescriptor as PerfettoThreadDescriptor;\nuse perfetto_protos::trace::Trace;\nuse perfetto_protos::trace_packet::TracePacket;\nuse perfetto_protos::trace_packet::trace_packet::Data;\nuse perfetto_protos::track_descriptor::TrackDescriptor;\nuse perfetto_protos::track_descriptor::track_descriptor::Static_or_dynamic_name;\nuse perfetto_protos::track_event::TrackEvent;\nuse perfetto_protos::track_event::track_event::{Counter_value_field, Name_field, Type};\nuse protobuf::{EnumOrUnknown, Message, MessageField};\nuse std::collections::HashMap;\nuse std::sync::{Arc, Mutex};\n\nconst SEQUENCE_ID: u32 = 1;\n// Sequence-scoped clock (>=64), mapped to BOOTTIME via ClockSnapshot.\n// All TrackEvent packets (slices, counters) use this clock on SEQUENCE_ID.\n//\n// Clock timeline notes:\n// - Clock 128 is sequence-scoped: the ClockSnapshot on SEQUENCE_ID defines it\n//   ONLY for that sequence. Other sequences cannot use it (see add_stack_traces).\n// - The ClockSnapshot must be the first packet (timestamp=0, self-referencing).\n//   Using a non-zero timestamp in clock 6 (BOOTTIME) instead doesn't work reliably.\n// - The first make_packet() call emits SEQ_INCREMENTAL_STATE_CLEARED (flags=1).\n//   This is safe because it's a TrackDescriptor without a timestamp (processed\n//   inline before the ClockSnapshot enters the sort queue).\n// - Never emit SEQ_INCREMENTAL_STATE_CLEARED on timestamped packets sharing this\n//   sequence — it destroys the clock mapping for all subsequent packets.\nconst CLOCK_ID_UNIXTIME: u32 = 128;\n\nstruct Sample {\n    callstack_iid: u64,\n    timestamp_us: i64,\n}\n\npub struct PerfettoTraceBuilder {\n    packets: Vec<TracePacket>,\n    next_uuid: u64,\n    next_sequence_id: u32,\n    first_event_emitted: bool,\n\n    function_name_iids: HashMap<String, u64>,\n    frame_iids: HashMap<(u64, u64), u64>,\n    callstack_iids: HashMap<Vec<u64>, u64>,\n    next_intern_id: u64,\n\n    host_uuids: HashMap<String, u64>,\n    // (host_name, category) → category track uuid\n    host_category_uuids: HashMap<(String, &'static str), u64>,\n    per_server: bool,\n    text_log_android: bool,\n}\n\nimpl PerfettoTraceBuilder {\n    pub fn new(per_server: bool, text_log_android: bool) -> Self {\n        PerfettoTraceBuilder {\n            packets: Vec::new(),\n            next_uuid: 1,\n            next_sequence_id: SEQUENCE_ID + 1,\n            first_event_emitted: false,\n\n            function_name_iids: HashMap::new(),\n            frame_iids: HashMap::new(),\n            callstack_iids: HashMap::new(),\n            next_intern_id: 1,\n\n            host_uuids: HashMap::new(),\n            host_category_uuids: HashMap::new(),\n            per_server,\n            text_log_android,\n        }\n    }\n\n    fn alloc_uuid(&mut self) -> u64 {\n        let uuid = self.next_uuid;\n        self.next_uuid += 1;\n        uuid\n    }\n\n    fn make_packet(&mut self) -> TracePacket {\n        let mut pkt = TracePacket::new();\n        pkt.set_trusted_packet_sequence_id(SEQUENCE_ID);\n        if !self.first_event_emitted {\n            pkt.sequence_flags = Some(1); // SEQ_INCREMENTAL_STATE_CLEARED\n            self.first_event_emitted = true;\n        } else {\n            pkt.sequence_flags = Some(2); // SEQ_NEEDS_INCREMENTAL_STATE\n        }\n        pkt\n    }\n\n    fn make_event_packet(&mut self, ts_ns: u64) -> TracePacket {\n        let mut pkt = self.make_packet();\n        pkt.timestamp = Some(ts_ns);\n        pkt.timestamp_clock_id = Some(CLOCK_ID_UNIXTIME);\n        pkt\n    }\n\n    fn add_process_track(&mut self, uuid: u64, name: &str) {\n        let mut pkt = self.make_packet();\n        let mut td = TrackDescriptor::new();\n        td.uuid = Some(uuid);\n        td.static_or_dynamic_name = Some(Static_or_dynamic_name::Name(name.to_string()));\n        pkt.data = Some(Data::TrackDescriptor(td));\n        self.packets.push(pkt);\n    }\n\n    fn add_child_track(&mut self, uuid: u64, parent_uuid: u64, name: &str) {\n        let mut pkt = self.make_packet();\n        let mut td = TrackDescriptor::new();\n        td.uuid = Some(uuid);\n        td.parent_uuid = Some(parent_uuid);\n        td.static_or_dynamic_name = Some(Static_or_dynamic_name::Name(name.to_string()));\n        pkt.data = Some(Data::TrackDescriptor(td));\n        self.packets.push(pkt);\n    }\n\n    fn add_counter_track(&mut self, uuid: u64, parent_uuid: u64, name: &str, unit: Unit) {\n        let mut pkt = self.make_packet();\n        let mut td = TrackDescriptor::new();\n        td.uuid = Some(uuid);\n        td.parent_uuid = Some(parent_uuid);\n        td.static_or_dynamic_name = Some(Static_or_dynamic_name::Name(name.to_string()));\n        let mut cd = CounterDescriptor::new();\n        cd.unit = Some(EnumOrUnknown::new(unit));\n        td.counter = MessageField::some(cd);\n        pkt.data = Some(Data::TrackDescriptor(td));\n        self.packets.push(pkt);\n    }\n\n    fn add_slice_begin(\n        &mut self,\n        track_uuid: u64,\n        name: &str,\n        ts_ns: u64,\n        annotations: Vec<DebugAnnotation>,\n    ) {\n        let mut pkt = self.make_event_packet(ts_ns);\n        let mut te = TrackEvent::new();\n        te.type_ = Some(EnumOrUnknown::new(Type::TYPE_SLICE_BEGIN));\n        te.track_uuid = Some(track_uuid);\n        te.name_field = Some(Name_field::Name(name.to_string()));\n        te.debug_annotations = annotations;\n        pkt.data = Some(Data::TrackEvent(te));\n        self.packets.push(pkt);\n    }\n\n    fn add_slice_end(&mut self, track_uuid: u64, ts_ns: u64) {\n        let mut pkt = self.make_event_packet(ts_ns);\n        let mut te = TrackEvent::new();\n        te.type_ = Some(EnumOrUnknown::new(Type::TYPE_SLICE_END));\n        te.track_uuid = Some(track_uuid);\n        pkt.data = Some(Data::TrackEvent(te));\n        self.packets.push(pkt);\n    }\n\n    fn add_instant(\n        &mut self,\n        track_uuid: u64,\n        name: &str,\n        ts_ns: u64,\n        annotations: Vec<DebugAnnotation>,\n    ) {\n        let mut pkt = self.make_event_packet(ts_ns);\n        let mut te = TrackEvent::new();\n        te.type_ = Some(EnumOrUnknown::new(Type::TYPE_INSTANT));\n        te.track_uuid = Some(track_uuid);\n        te.name_field = Some(Name_field::Name(name.to_string()));\n        te.debug_annotations = annotations;\n        pkt.data = Some(Data::TrackEvent(te));\n        self.packets.push(pkt);\n    }\n\n    fn add_counter_value(&mut self, track_uuid: u64, ts_ns: u64, value: i64) {\n        let mut pkt = self.make_event_packet(ts_ns);\n        let mut te = TrackEvent::new();\n        te.type_ = Some(EnumOrUnknown::new(Type::TYPE_COUNTER));\n        te.track_uuid = Some(track_uuid);\n        te.counter_value_field = Some(Counter_value_field::CounterValue(value));\n        pkt.data = Some(Data::TrackEvent(te));\n        self.packets.push(pkt);\n    }\n\n    /// Returns (unit, scale_factor) for a ProfileEvent name.\n    /// Scale factor converts the raw value to the unit's base\n    /// (e.g. microseconds × 1000 → nanoseconds for UNIT_TIME_NS).\n    fn unit_for_event(name: &str) -> (Unit, i64) {\n        if name.ends_with(\"Bytes\") {\n            (Unit::UNIT_SIZE_BYTES, 1)\n        } else if name.ends_with(\"Microseconds\") {\n            (Unit::UNIT_TIME_NS, 1000)\n        } else if name.ends_with(\"Milliseconds\") {\n            (Unit::UNIT_TIME_NS, 1_000_000)\n        } else if name.ends_with(\"Nanoseconds\") {\n            (Unit::UNIT_TIME_NS, 1)\n        } else {\n            (Unit::UNIT_UNSPECIFIED, 1)\n        }\n    }\n\n    fn make_annotation_str(name: &str, value: &str) -> DebugAnnotation {\n        let mut ann = DebugAnnotation::new();\n        ann.name_field = Some(da::Name_field::Name(name.to_string()));\n        ann.value = Some(da::Value::StringValue(value.to_string()));\n        ann\n    }\n\n    fn make_annotation_int(name: &str, value: i64) -> DebugAnnotation {\n        let mut ann = DebugAnnotation::new();\n        ann.name_field = Some(da::Name_field::Name(name.to_string()));\n        ann.value = Some(da::Value::IntValue(value));\n        ann\n    }\n\n    fn datetime_to_ns(dt: &DateTime<Local>) -> Option<u64> {\n        dt.timestamp_nanos_opt().map(|ns| ns as u64)\n    }\n\n    fn log_level_to_prio(level: &str) -> AndroidLogPriority {\n        match level {\n            \"Fatal\" | \"Critical\" => AndroidLogPriority::PRIO_FATAL,\n            \"Error\" => AndroidLogPriority::PRIO_ERROR,\n            \"Warning\" => AndroidLogPriority::PRIO_WARN,\n            \"Information\" => AndroidLogPriority::PRIO_INFO,\n            \"Debug\" => AndroidLogPriority::PRIO_DEBUG,\n            _ => AndroidLogPriority::PRIO_VERBOSE,\n        }\n    }\n\n    // --- High-level methods ---\n\n    pub fn add_queries(&mut self, queries: &[Query]) {\n        // (host, user) → thread_uuid\n        let mut user_uuids: HashMap<(String, String), u64> = HashMap::new();\n\n        for q in queries {\n            let host_uuid = self.get_or_create_host_uuid(&q.host_name);\n\n            let user_key = (q.host_name.clone(), q.user.clone());\n            let user_uuid = *user_uuids.entry(user_key).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, host_uuid, &q.user);\n                uuid\n            });\n\n            let start_ns = match Self::datetime_to_ns(&q.query_start_time_microseconds) {\n                Some(ns) => ns,\n                None => {\n                    log::warn!(\"Perfetto: query {} has invalid start time\", q.query_id);\n                    continue;\n                }\n            };\n            let end_ns = match Self::datetime_to_ns(&q.query_end_time_microseconds) {\n                Some(ns) => ns,\n                None => {\n                    log::warn!(\"Perfetto: query {} has invalid end time\", q.query_id);\n                    continue;\n                }\n            };\n\n            let label = if q.normalized_query.chars().count() > 80 {\n                let truncated: String = q.normalized_query.chars().take(80).collect();\n                format!(\"{}...\", truncated)\n            } else {\n                q.normalized_query.clone()\n            };\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"query_id\", &q.query_id),\n                Self::make_annotation_str(\"initial_query_id\", &q.initial_query_id),\n                Self::make_annotation_str(\"user\", &q.user),\n                Self::make_annotation_str(\"database\", &q.current_database),\n                Self::make_annotation_int(\"memory\", q.memory),\n                Self::make_annotation_int(\"threads\", q.threads as i64),\n            ];\n            if !q.original_query.is_empty() {\n                annotations.push(Self::make_annotation_str(\"query\", &q.original_query));\n            }\n\n            self.add_slice_begin(user_uuid, &label, start_ns, annotations);\n            self.add_slice_end(user_uuid, end_ns);\n        }\n    }\n\n    fn get_or_create_host_uuid(&mut self, host_name: &str) -> u64 {\n        if let Some(&uuid) = self.host_uuids.get(host_name) {\n            return uuid;\n        }\n        let uuid = self.alloc_uuid();\n        self.add_process_track(uuid, host_name);\n        self.host_uuids.insert(host_name.to_string(), uuid);\n        uuid\n    }\n\n    fn get_host_category_track(&mut self, host_name: &str, category: &'static str) -> Option<u64> {\n        if !self.per_server || host_name.is_empty() {\n            return None;\n        }\n        let host_uuid = self.get_or_create_host_uuid(host_name);\n        let key = (host_name.to_string(), category);\n        if let Some(&uuid) = self.host_category_uuids.get(&key) {\n            Some(uuid)\n        } else {\n            let uuid = self.alloc_uuid();\n            self.add_child_track(uuid, host_uuid, category);\n            self.host_category_uuids.insert(key, uuid);\n            Some(uuid)\n        }\n    }\n\n    pub fn add_otel_spans(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        // Group spans by operation_name → thread track under query's host process\n        // Use a single process track for OTel spans\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"OpenTelemetry Spans\");\n\n        let mut op_uuids: HashMap<String, u64> = HashMap::new();\n        // (host_uuid, operation_name) → track_uuid\n        let mut server_op_uuids: HashMap<(u64, String), u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let operation_name: String = columns.get(i, \"operation_name\").unwrap_or_default();\n            let start_us: u64 = match columns.get(i, \"start_time_us\") {\n                Ok(v) => v,\n                Err(e) => {\n                    log::warn!(\"Perfetto: otel_span row {} start_time_us: {}\", i, e);\n                    continue;\n                }\n            };\n            let finish_us: u64 = match columns.get(i, \"finish_time_us\") {\n                Ok(v) => v,\n                Err(e) => {\n                    log::warn!(\"Perfetto: otel_span row {} finish_time_us: {}\", i, e);\n                    continue;\n                }\n            };\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n\n            let start_ns = start_us.saturating_mul(1000);\n            let end_ns = finish_us.saturating_mul(1000);\n\n            let track_uuid = *op_uuids.entry(operation_name.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(\n                    uuid,\n                    process_uuid,\n                    &format!(\"Processor: {}\", operation_name),\n                );\n                uuid\n            });\n\n            let annotations = vec![Self::make_annotation_str(\"query_id\", &query_id)];\n\n            self.add_slice_begin(track_uuid, &operation_name, start_ns, annotations.clone());\n            self.add_slice_end(track_uuid, end_ns);\n\n            if let Some(cat_uuid) = self.get_host_category_track(&host_name, \"OpenTelemetry Spans\")\n            {\n                let server_track = *server_op_uuids\n                    .entry((cat_uuid, operation_name.clone()))\n                    .or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_child_track(uuid, cat_uuid, &operation_name);\n                        uuid\n                    });\n                self.add_slice_begin(server_track, &operation_name, start_ns, annotations);\n                self.add_slice_end(server_track, end_ns);\n            }\n        }\n    }\n\n    pub fn add_trace_log_counters(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"ProfileEvent Counters\");\n\n        // event_name → (track_uuid, running_total)\n        let mut counter_tracks: HashMap<String, (u64, i64)> = HashMap::new();\n        // (host_uuid, event_name) → (track_uuid, running_total)\n        let mut server_tracks: HashMap<(u64, String), (u64, i64)> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let event: String = columns.get(i, \"event\").unwrap_or_default();\n            let increment: i64 = columns.get(i, \"increment\").unwrap_or(0);\n            let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: trace_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            let (unit, scale) = Self::unit_for_event(&event);\n            let scaled_increment = increment * scale;\n            let (track_uuid, running_total) =\n                counter_tracks.entry(event.clone()).or_insert_with(|| {\n                    let uuid = self.alloc_uuid();\n                    self.add_counter_track(uuid, process_uuid, &event, unit);\n                    (uuid, 0)\n                });\n\n            *running_total += scaled_increment;\n            self.add_counter_value(*track_uuid, timestamp_ns, *running_total);\n\n            if let Some(cat_uuid) =\n                self.get_host_category_track(&host_name, \"ProfileEvent Counters\")\n            {\n                let (track_uuid, running_total) = server_tracks\n                    .entry((cat_uuid, event.clone()))\n                    .or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_counter_track(uuid, cat_uuid, &event, unit);\n                        (uuid, 0)\n                    });\n                *running_total += scaled_increment;\n                self.add_counter_value(*track_uuid, timestamp_ns, *running_total);\n            }\n        }\n    }\n\n    pub fn add_query_metrics(&mut self, rows: &[QueryMetricRow]) {\n        if rows.is_empty() {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Query Metrics\");\n\n        // metric_name → track_uuid\n        let mut counter_tracks: HashMap<String, u64> = HashMap::new();\n        // (host_uuid, metric_name) → track_uuid\n        let mut server_tracks: HashMap<(u64, String), u64> = HashMap::new();\n\n        for row in rows {\n            // memory_usage / peak_memory_usage\n            for (name, value, unit) in [\n                (\"memory_usage\", row.memory_usage, Unit::UNIT_SIZE_BYTES),\n                (\n                    \"peak_memory_usage\",\n                    row.peak_memory_usage,\n                    Unit::UNIT_SIZE_BYTES,\n                ),\n            ] {\n                let track_uuid = *counter_tracks.entry(name.to_string()).or_insert_with(|| {\n                    let uuid = self.alloc_uuid();\n                    self.add_counter_track(uuid, process_uuid, name, unit);\n                    uuid\n                });\n                self.add_counter_value(track_uuid, row.timestamp_ns, value);\n\n                if let Some(cat_uuid) =\n                    self.get_host_category_track(&row.host_name, \"Query Metrics\")\n                {\n                    let server_track = *server_tracks\n                        .entry((cat_uuid, name.to_string()))\n                        .or_insert_with(|| {\n                            let uuid = self.alloc_uuid();\n                            self.add_counter_track(uuid, cat_uuid, name, unit);\n                            uuid\n                        });\n                    self.add_counter_value(server_track, row.timestamp_ns, value);\n                }\n            }\n\n            // ProfileEvent_* metrics\n            for (name, value) in &row.profile_events {\n                let (unit, scale) = Self::unit_for_event(name);\n                let scaled_value = *value as i64 * scale;\n                let track_uuid = *counter_tracks.entry(name.clone()).or_insert_with(|| {\n                    let uuid = self.alloc_uuid();\n                    self.add_counter_track(uuid, process_uuid, name, unit);\n                    uuid\n                });\n                self.add_counter_value(track_uuid, row.timestamp_ns, scaled_value);\n\n                if let Some(cat_uuid) =\n                    self.get_host_category_track(&row.host_name, \"Query Metrics\")\n                {\n                    let server_track = *server_tracks\n                        .entry((cat_uuid, name.clone()))\n                        .or_insert_with(|| {\n                            let uuid = self.alloc_uuid();\n                            self.add_counter_track(uuid, cat_uuid, name, unit);\n                            uuid\n                        });\n                    self.add_counter_value(server_track, row.timestamp_ns, scaled_value);\n                }\n            }\n        }\n    }\n\n    pub fn add_part_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Part Log\");\n\n        // \"db.table\" → thread_uuid\n        let mut table_uuids: HashMap<String, u64> = HashMap::new();\n        // (host_uuid, \"db.table\") → track_uuid\n        let mut server_table_uuids: HashMap<(u64, String), u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let event_type: String = columns.get(i, \"event_type\").unwrap_or_default();\n            let event_time: DateTime<Tz> = match columns.get(i, \"event_time_microseconds\") {\n                Ok(v) => v,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: part_log row {} event_time_microseconds: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n            let duration_ms: u64 = columns.get(i, \"duration_ms\").unwrap_or(0);\n            let database: String = columns.get(i, \"database\").unwrap_or_default();\n            let table: String = columns.get(i, \"table\").unwrap_or_default();\n            let part_name: String = columns.get(i, \"part_name\").unwrap_or_default();\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let rows: u64 = columns.get(i, \"rows\").unwrap_or(0);\n            let size_in_bytes: u64 = columns.get(i, \"size_in_bytes\").unwrap_or(0);\n            let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n\n            let table_key = format!(\"{}.{}\", database, table);\n            let track_uuid = *table_uuids.entry(table_key.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &table_key);\n                uuid\n            });\n\n            let end_ns = match event_time.with_timezone(&Local).timestamp_nanos_opt() {\n                Some(ns) => ns as u64,\n                None => {\n                    log::warn!(\"Perfetto: part_log row {} timestamp overflow\", i);\n                    continue;\n                }\n            };\n            let start_ns = end_ns.saturating_sub(duration_ms * 1_000_000);\n\n            let label = format!(\"{} {}\", event_type, part_name);\n            let annotations = vec![\n                Self::make_annotation_str(\"query_id\", &query_id),\n                Self::make_annotation_str(\"part_name\", &part_name),\n                Self::make_annotation_int(\"rows\", rows as i64),\n                Self::make_annotation_int(\"size_in_bytes\", size_in_bytes as i64),\n            ];\n\n            self.add_slice_begin(track_uuid, &label, start_ns, annotations.clone());\n            self.add_slice_end(track_uuid, end_ns);\n\n            if let Some(cat_uuid) = self.get_host_category_track(&host_name, \"Part Log\") {\n                let server_track = *server_table_uuids\n                    .entry((cat_uuid, table_key.clone()))\n                    .or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_child_track(uuid, cat_uuid, &table_key);\n                        uuid\n                    });\n                self.add_slice_begin(server_track, &label, start_ns, annotations);\n                self.add_slice_end(server_track, end_ns);\n            }\n        }\n    }\n\n    pub fn add_query_thread_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Query Threads\");\n\n        // thread_name → track_uuid\n        let mut thread_uuids: HashMap<String, u64> = HashMap::new();\n        // (host_uuid, thread_name) → track_uuid\n        let mut server_thread_uuids: HashMap<(u64, String), u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let thread_name: String = columns.get(i, \"thread_name\").unwrap_or_default();\n            let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: query_thread_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n            let duration_ms: u64 = columns.get(i, \"query_duration_ms\").unwrap_or(0);\n            let peak_memory: i64 = columns.get(i, \"peak_memory_usage\").unwrap_or(0);\n\n            let names: Vec<String> = columns.get(i, \"ProfileEvents.Names\").unwrap_or_default();\n            let values: Vec<u64> = columns.get(i, \"ProfileEvents.Values\").unwrap_or_default();\n\n            let track_uuid = *thread_uuids.entry(thread_name.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &thread_name);\n                uuid\n            });\n\n            let end_ns = timestamp_ns;\n            let start_ns = end_ns.saturating_sub(duration_ms * 1_000_000);\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"query_id\", &query_id),\n                Self::make_annotation_str(\"thread_name\", &thread_name),\n                Self::make_annotation_int(\"peak_memory_usage\", peak_memory),\n            ];\n\n            // Add top ProfileEvents as annotations\n            let mut pe: Vec<(String, u64)> = names.into_iter().zip(values).collect();\n            pe.sort_by(|a, b| b.1.cmp(&a.1));\n            for (name, value) in pe.iter().take(10) {\n                if *value > 0 {\n                    annotations.push(Self::make_annotation_int(name, *value as i64));\n                }\n            }\n\n            self.add_slice_begin(track_uuid, &query_id, start_ns, annotations.clone());\n            self.add_slice_end(track_uuid, end_ns);\n\n            if let Some(cat_uuid) = self.get_host_category_track(&host_name, \"Query Threads\") {\n                let server_track = *server_thread_uuids\n                    .entry((cat_uuid, thread_name.clone()))\n                    .or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_child_track(uuid, cat_uuid, &thread_name);\n                        uuid\n                    });\n                self.add_slice_begin(server_track, &query_id, start_ns, annotations);\n                self.add_slice_end(server_track, end_ns);\n            }\n        }\n    }\n\n    pub fn add_text_logs(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Query Logs\");\n\n        // level → track_uuid\n        let mut level_uuids: HashMap<String, u64> = HashMap::new();\n        // (host_uuid, level) → track_uuid\n        let mut server_level_uuids: HashMap<(u64, String), u64> = HashMap::new();\n\n        let mut alp = if self.text_log_android {\n            Some(AndroidLogPacket::new())\n        } else {\n            None\n        };\n\n        for i in 0..columns.row_count() {\n            let level: String = columns.get(i, \"level\").unwrap_or_default();\n            let logger_name: String = columns.get(i, \"logger_name\").unwrap_or_default();\n            let message: String = columns.get(i, \"message\").unwrap_or_default();\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: text_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            let track_uuid = *level_uuids.entry(level.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &level);\n                uuid\n            });\n\n            let annotations = vec![\n                Self::make_annotation_str(\"query_id\", &query_id),\n                Self::make_annotation_str(\"level\", &level),\n                Self::make_annotation_str(\"logger\", &logger_name),\n            ];\n\n            self.add_instant(track_uuid, &message, timestamp_ns, annotations.clone());\n\n            if let Some(cat_uuid) = self.get_host_category_track(&host_name, \"Query Logs\") {\n                let server_track = *server_level_uuids\n                    .entry((cat_uuid, level.clone()))\n                    .or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_child_track(uuid, cat_uuid, &level);\n                        uuid\n                    });\n                self.add_instant(server_track, &message, timestamp_ns, annotations);\n            }\n\n            if let Some(ref mut alp) = alp {\n                let mut event = LogEvent::new();\n                event.timestamp = Some(timestamp_ns);\n                event.tag = Some(logger_name);\n                event.message = Some(message);\n                event.prio = Some(EnumOrUnknown::new(Self::log_level_to_prio(&level)));\n                alp.events.push(event);\n            }\n        }\n\n        if let Some(alp) = alp.filter(|a| !a.events.is_empty()) {\n            let first_ts = alp.events[0].timestamp.unwrap_or(0);\n            let mut pkt = self.make_event_packet(first_ts);\n            pkt.data = Some(Data::AndroidLog(alp));\n            self.packets.push(pkt);\n        }\n    }\n\n    pub fn add_metric_log(&mut self, rows: &[MetricLogRow]) {\n        if rows.is_empty() {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Metric Log\");\n\n        // event_name → (track_uuid, running_total)\n        let mut pe_tracks: HashMap<String, (u64, i64)> = HashMap::new();\n        // metric_name → track_uuid\n        let mut cm_tracks: HashMap<String, u64> = HashMap::new();\n\n        for row in rows {\n            for (name, value) in &row.profile_events {\n                let (unit, scale) = Self::unit_for_event(name);\n                let scaled = *value as i64 * scale;\n                let (track_uuid, running_total) =\n                    pe_tracks.entry(name.clone()).or_insert_with(|| {\n                        let uuid = self.alloc_uuid();\n                        self.add_counter_track(uuid, process_uuid, name, unit);\n                        (uuid, 0)\n                    });\n                *running_total += scaled;\n                self.add_counter_value(*track_uuid, row.timestamp_ns, *running_total);\n            }\n\n            for (name, value) in &row.current_metrics {\n                let (unit, scale) = Self::unit_for_event(name);\n                let track_uuid = *cm_tracks.entry(name.clone()).or_insert_with(|| {\n                    let uuid = self.alloc_uuid();\n                    self.add_counter_track(uuid, process_uuid, name, unit);\n                    uuid\n                });\n                self.add_counter_value(track_uuid, row.timestamp_ns, *value * scale);\n            }\n        }\n    }\n\n    pub fn add_asynchronous_metric_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Async Metrics\");\n\n        let mut counter_tracks: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let metric: String = columns.get(i, \"metric\").unwrap_or_default();\n            let value: f64 = columns.get(i, \"value\").unwrap_or(0.0);\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: asynchronous_metric_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            let track_uuid = *counter_tracks.entry(metric.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_counter_track(uuid, process_uuid, &metric, Unit::UNIT_UNSPECIFIED);\n                uuid\n            });\n\n            self.add_counter_value(track_uuid, timestamp_ns, value as i64);\n        }\n    }\n\n    pub fn add_asynchronous_insert_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Async Inserts\");\n\n        let mut table_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let database: String = columns.get(i, \"database\").unwrap_or_default();\n            let table: String = columns.get(i, \"table\").unwrap_or_default();\n            let format: String = columns.get(i, \"format\").unwrap_or_default();\n            let status: String = columns.get(i, \"status\").unwrap_or_default();\n            let bytes: u64 = columns.get(i, \"bytes\").unwrap_or(0);\n            let exception: String = columns.get(i, \"exception\").unwrap_or_default();\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n\n            let start_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: asynchronous_insert_log row {} event_time_microseconds: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n            let end_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"flush_time_microseconds\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(_) => start_ns,\n            };\n\n            let table_key = format!(\"{}.{}\", database, table);\n            let track_uuid = *table_uuids.entry(table_key.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &table_key);\n                uuid\n            });\n\n            let label = format!(\"{} ({})\", table_key, status);\n            let mut annotations = vec![\n                Self::make_annotation_str(\"query_id\", &query_id),\n                Self::make_annotation_str(\"format\", &format),\n                Self::make_annotation_str(\"status\", &status),\n                Self::make_annotation_int(\"bytes\", bytes as i64),\n            ];\n            if !exception.is_empty() {\n                annotations.push(Self::make_annotation_str(\"exception\", &exception));\n            }\n\n            self.add_slice_begin(track_uuid, &label, start_ns, annotations);\n            self.add_slice_end(track_uuid, end_ns);\n        }\n    }\n\n    pub fn add_error_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Error Log\");\n\n        let mut error_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let error: String = columns.get(i, \"error\").unwrap_or_default();\n            let code: i64 = columns.get(i, \"code\").unwrap_or(0);\n            let value: u64 = columns.get(i, \"value\").unwrap_or(0);\n            let remote: u8 = columns.get(i, \"remote\").unwrap_or(0);\n            let last_error_message: String =\n                columns.get(i, \"last_error_message\").unwrap_or_default();\n            let timestamp_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"event_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\"Perfetto: error_log row {} event_time: {}\", i, e);\n                    continue;\n                }\n            };\n\n            let track_uuid = *error_uuids.entry(error.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &error);\n                uuid\n            });\n\n            let mut annotations = vec![\n                Self::make_annotation_int(\"code\", code),\n                Self::make_annotation_int(\"value\", value as i64),\n                Self::make_annotation_int(\"remote\", remote as i64),\n            ];\n            if !last_error_message.is_empty() {\n                annotations.push(Self::make_annotation_str(\n                    \"last_error_message\",\n                    &last_error_message,\n                ));\n            }\n\n            self.add_instant(track_uuid, &error, timestamp_ns, annotations);\n        }\n    }\n\n    pub fn add_s3_queue_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"S3 Queue\");\n\n        let track_uuid = self.alloc_uuid();\n        self.add_child_track(track_uuid, process_uuid, \"files\");\n\n        for i in 0..columns.row_count() {\n            let file_name: String = columns.get(i, \"file_name\").unwrap_or_default();\n            let rows_processed: u64 = columns.get(i, \"rows_processed\").unwrap_or(0);\n            let status: String = columns.get(i, \"status\").unwrap_or_default();\n            let exception: String = columns.get(i, \"exception\").unwrap_or_default();\n\n            let start_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"processing_start_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(_) => continue,\n            };\n            let end_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"processing_end_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(_) => start_ns,\n            };\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"file_name\", &file_name),\n                Self::make_annotation_int(\"rows_processed\", rows_processed as i64),\n                Self::make_annotation_str(\"status\", &status),\n            ];\n            if !exception.is_empty() {\n                annotations.push(Self::make_annotation_str(\"exception\", &exception));\n            }\n\n            self.add_slice_begin(track_uuid, &file_name, start_ns, annotations);\n            self.add_slice_end(track_uuid, end_ns);\n        }\n    }\n\n    pub fn add_azure_queue_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Azure Queue\");\n\n        let mut table_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let database: String = columns.get(i, \"database\").unwrap_or_default();\n            let table: String = columns.get(i, \"table\").unwrap_or_default();\n            let file_name: String = columns.get(i, \"file_name\").unwrap_or_default();\n            let rows_processed: u64 = columns.get(i, \"rows_processed\").unwrap_or(0);\n            let status: String = columns.get(i, \"status\").unwrap_or_default();\n            let exception: String = columns.get(i, \"exception\").unwrap_or_default();\n\n            let start_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"processing_start_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(_) => continue,\n            };\n            let end_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"processing_end_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(_) => start_ns,\n            };\n\n            let table_key = format!(\"{}.{}\", database, table);\n            let track_uuid = *table_uuids.entry(table_key.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &table_key);\n                uuid\n            });\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"file_name\", &file_name),\n                Self::make_annotation_int(\"rows_processed\", rows_processed as i64),\n                Self::make_annotation_str(\"status\", &status),\n            ];\n            if !exception.is_empty() {\n                annotations.push(Self::make_annotation_str(\"exception\", &exception));\n            }\n\n            self.add_slice_begin(track_uuid, &file_name, start_ns, annotations);\n            self.add_slice_end(track_uuid, end_ns);\n        }\n    }\n\n    pub fn add_blob_storage_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Blob Storage\");\n\n        let mut type_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let event_type: String = columns.get(i, \"event_type\").unwrap_or_default();\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let disk_name: String = columns.get(i, \"disk_name\").unwrap_or_default();\n            let bucket: String = columns.get(i, \"bucket\").unwrap_or_default();\n            let remote_path: String = columns.get(i, \"remote_path\").unwrap_or_default();\n            let data_size: u64 = columns.get(i, \"data_size\").unwrap_or(0);\n            let error: String = columns.get(i, \"error\").unwrap_or_default();\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: blob_storage_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            let track_uuid = *type_uuids.entry(event_type.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &event_type);\n                uuid\n            });\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"query_id\", &query_id),\n                Self::make_annotation_str(\"disk_name\", &disk_name),\n                Self::make_annotation_str(\"bucket\", &bucket),\n                Self::make_annotation_str(\"remote_path\", &remote_path),\n                Self::make_annotation_int(\"data_size\", data_size as i64),\n            ];\n            if !error.is_empty() {\n                annotations.push(Self::make_annotation_str(\"error\", &error));\n            }\n\n            self.add_instant(track_uuid, &event_type, timestamp_ns, annotations);\n        }\n    }\n\n    pub fn add_background_pool_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Background Pool\");\n\n        let mut log_name_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let log_name: String = columns.get(i, \"log_name\").unwrap_or_default();\n            let database: String = columns.get(i, \"database\").unwrap_or_default();\n            let table: String = columns.get(i, \"table\").unwrap_or_default();\n            let query_id: String = columns.get(i, \"query_id\").unwrap_or_default();\n            let duration_ms: u64 = columns.get(i, \"duration_ms\").unwrap_or(0);\n            let error: String = columns.get(i, \"error\").unwrap_or_default();\n            let exception: String = columns.get(i, \"exception\").unwrap_or_default();\n            let end_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: background_schedule_pool_log row {} event_time_microseconds: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n            let start_ns = end_ns.saturating_sub(duration_ms * 1_000_000);\n\n            let track_uuid = *log_name_uuids.entry(log_name.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &log_name);\n                uuid\n            });\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"database\", &database),\n                Self::make_annotation_str(\"table\", &table),\n                Self::make_annotation_str(\"query_id\", &query_id),\n            ];\n            if !error.is_empty() {\n                annotations.push(Self::make_annotation_str(\"error\", &error));\n            }\n            if !exception.is_empty() {\n                annotations.push(Self::make_annotation_str(\"exception\", &exception));\n            }\n\n            let label = format!(\"{}.{}\", database, table);\n            self.add_slice_begin(track_uuid, &label, start_ns, annotations);\n            self.add_slice_end(track_uuid, end_ns);\n        }\n    }\n\n    pub fn add_session_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"Sessions\");\n\n        let mut type_uuids: HashMap<String, u64> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let session_type: String = columns.get(i, \"type\").unwrap_or_default();\n            let user: String = columns.get(i, \"user\").unwrap_or_default();\n            let auth_type: String = columns.get(i, \"auth_type\").unwrap_or_default();\n            let interface: String = columns.get(i, \"interface\").unwrap_or_default();\n            let client_address: String = columns.get(i, \"client_address\").unwrap_or_default();\n            let client_name: String = columns.get(i, \"client_name\").unwrap_or_default();\n            let failure_reason: String = columns.get(i, \"failure_reason\").unwrap_or_default();\n            let timestamp_ns: u64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: session_log row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            let track_uuid = *type_uuids.entry(session_type.clone()).or_insert_with(|| {\n                let uuid = self.alloc_uuid();\n                self.add_child_track(uuid, process_uuid, &session_type);\n                uuid\n            });\n\n            let mut annotations = vec![\n                Self::make_annotation_str(\"user\", &user),\n                Self::make_annotation_str(\"auth_type\", &auth_type),\n                Self::make_annotation_str(\"interface\", &interface),\n                Self::make_annotation_str(\"client_address\", &client_address),\n                Self::make_annotation_str(\"client_name\", &client_name),\n            ];\n            if !failure_reason.is_empty() {\n                annotations.push(Self::make_annotation_str(\"failure_reason\", &failure_reason));\n            }\n\n            let label = format!(\"{} ({})\", session_type, user);\n            self.add_instant(track_uuid, &label, timestamp_ns, annotations);\n        }\n    }\n\n    pub fn add_aggregated_zookeeper_log(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        let process_uuid = self.alloc_uuid();\n        self.add_process_track(process_uuid, \"ZooKeeper\");\n\n        // operation → (count_track, latency_track)\n        let mut op_tracks: HashMap<String, (u64, u64)> = HashMap::new();\n\n        for i in 0..columns.row_count() {\n            let operation: String = columns.get(i, \"operation\").unwrap_or_default();\n            let count: u64 = columns.get(i, \"count\").unwrap_or(0);\n            let average_latency: f64 = columns.get(i, \"average_latency\").unwrap_or(0.0);\n            let parent_path: String = columns.get(i, \"parent_path\").unwrap_or_default();\n            let component: String = columns.get(i, \"component\").unwrap_or_default();\n\n            let timestamp_ns: u64 = match columns.get::<DateTime<Tz>, _>(i, \"event_time\") {\n                Ok(dt) => dt.with_timezone(&Local).timestamp_nanos_opt().unwrap_or(0) as u64,\n                Err(e) => {\n                    log::warn!(\n                        \"Perfetto: aggregated_zookeeper_log row {} event_time: {}\",\n                        i,\n                        e\n                    );\n                    continue;\n                }\n            };\n\n            let (count_track, latency_track) =\n                *op_tracks.entry(operation.clone()).or_insert_with(|| {\n                    let ct = self.alloc_uuid();\n                    self.add_counter_track(\n                        ct,\n                        process_uuid,\n                        &format!(\"{} count\", operation),\n                        Unit::UNIT_UNSPECIFIED,\n                    );\n                    let lt = self.alloc_uuid();\n                    self.add_counter_track(\n                        lt,\n                        process_uuid,\n                        &format!(\"{} avg_latency\", operation),\n                        Unit::UNIT_UNSPECIFIED,\n                    );\n                    (ct, lt)\n                });\n\n            self.add_counter_value(count_track, timestamp_ns, count as i64);\n            self.add_counter_value(latency_track, timestamp_ns, average_latency as i64);\n\n            // Also emit an instant with annotations for the detail\n            if !parent_path.is_empty() || !component.is_empty() {\n                let error_names: Vec<String> = columns.get(i, \"error_names\").unwrap_or_default();\n                let error_counts: Vec<u32> = columns.get(i, \"error_counts\").unwrap_or_default();\n\n                let mut annotations = vec![\n                    Self::make_annotation_str(\"parent_path\", &parent_path),\n                    Self::make_annotation_str(\"component\", &component),\n                    Self::make_annotation_int(\"count\", count as i64),\n                ];\n                for (en, ec) in error_names.iter().zip(error_counts.iter()) {\n                    annotations.push(Self::make_annotation_int(en, *ec as i64));\n                }\n\n                // Use count_track for the instant\n                self.add_instant(count_track, &operation, timestamp_ns, annotations);\n            }\n        }\n    }\n\n    fn alloc_intern_id(&mut self) -> u64 {\n        let id = self.next_intern_id;\n        self.next_intern_id += 1;\n        id\n    }\n\n    // Add CPU/Real/Memory stack trace samples as StreamingProfilePacket.\n    //\n    // Perfetto profiling timeline pitfalls (hard-won lessons):\n    // - Clock 128 is sequence-scoped: a ClockSnapshot on seq 1 does NOT help seq 2+.\n    // - Built-in clocks (e.g. BOOTTIME=6) also fail on non-main sequences in practice.\n    // - SEQ_INCREMENTAL_STATE_CLEARED nukes clock mappings on the sequence — never\n    //   use it on the main sequence after the ClockSnapshot.\n    // - StreamingProfilePacket timestamps come from ThreadDescriptor.reference_timestamp_us\n    //   + timestamp_delta_us, NOT from TracePacket.timestamp. If reference_timestamp_us\n    //   is unset, all samples land at time 0.\n    // - Samples go into cpu_profile_stack_sample table, not perf_sample.\n    //\n    // The working approach: each trace type gets its own sequence with a ThreadDescriptor\n    // that carries reference_timestamp_us (microseconds). No clock_id needed on the\n    // packets — timing is entirely from reference_timestamp_us + deltas.\n    pub fn add_stack_traces(&mut self, columns: &Columns) {\n        if columns.row_count() == 0 {\n            return;\n        }\n\n        // Global: trace_type → samples\n        let mut samples_by_type: HashMap<String, Vec<Sample>> = HashMap::new();\n        // Per-server: (host_name, trace_type) → samples\n        let mut samples_by_host_type: HashMap<(String, String), Vec<Sample>> = HashMap::new();\n\n        // Interning accumulators for this batch\n        let mut interned_strings: Vec<InternedString> = Vec::new();\n        let mut interned_frames: Vec<Frame> = Vec::new();\n        let mut interned_callstacks: Vec<Callstack> = Vec::new();\n\n        let mapping_iid = self.alloc_intern_id();\n\n        for i in 0..columns.row_count() {\n            let trace_type: String = columns.get(i, \"trace_type\").unwrap_or_default();\n            let stack: Vec<String> = columns.get(i, \"stack\").unwrap_or_default();\n\n            if stack.is_empty() {\n                continue;\n            }\n\n            let timestamp_us: i64 =\n                match columns.get::<DateTime<Tz>, _>(i, \"event_time_microseconds\") {\n                    Ok(dt) => dt.with_timezone(&Local).timestamp_micros(),\n                    Err(e) => {\n                        log::warn!(\n                            \"Perfetto: stack trace row {} event_time_microseconds: {}\",\n                            i,\n                            e\n                        );\n                        continue;\n                    }\n                };\n\n            // Intern each frame in the stack\n            let mut frame_ids = Vec::with_capacity(stack.len());\n            for func_name in &stack {\n                let func_iid = *self\n                    .function_name_iids\n                    .entry(func_name.clone())\n                    .or_insert_with(|| {\n                        let iid = self.next_intern_id;\n                        self.next_intern_id += 1;\n                        let mut is = InternedString::new();\n                        is.iid = Some(iid);\n                        is.str = Some(func_name.as_bytes().to_vec());\n                        interned_strings.push(is);\n                        iid\n                    });\n\n                let frame_key = (func_iid, mapping_iid);\n                let frame_iid = *self.frame_iids.entry(frame_key).or_insert_with(|| {\n                    let iid = self.next_intern_id;\n                    self.next_intern_id += 1;\n                    let mut f = Frame::new();\n                    f.iid = Some(iid);\n                    f.function_name_id = Some(func_iid);\n                    f.mapping_id = Some(mapping_iid);\n                    interned_frames.push(f);\n                    iid\n                });\n\n                frame_ids.push(frame_iid);\n            }\n\n            let callstack_iid =\n                *self\n                    .callstack_iids\n                    .entry(frame_ids.clone())\n                    .or_insert_with(|| {\n                        let iid = self.next_intern_id;\n                        self.next_intern_id += 1;\n                        let mut cs = Callstack::new();\n                        cs.iid = Some(iid);\n                        cs.frame_ids = frame_ids;\n                        interned_callstacks.push(cs);\n                        iid\n                    });\n\n            samples_by_type\n                .entry(trace_type.clone())\n                .or_default()\n                .push(Sample {\n                    callstack_iid,\n                    timestamp_us,\n                });\n\n            if self.per_server {\n                let host_name: String = columns.get(i, \"host_name\").unwrap_or_default();\n                if !host_name.is_empty() {\n                    samples_by_host_type\n                        .entry((host_name, trace_type))\n                        .or_default()\n                        .push(Sample {\n                            callstack_iid,\n                            timestamp_us,\n                        });\n                }\n            }\n        }\n\n        // Build one dummy mapping\n        let mut mapping = Mapping::new();\n        mapping.iid = Some(mapping_iid);\n\n        // Each trace_type gets its own sequence with a dedicated ThreadDescriptor.\n        // Sample timestamps come from ThreadDescriptor.reference_timestamp_us + deltas,\n        // so profiling packets don't need clock_id/timestamp (avoids sequence-scoped\n        // clock 128 resolution issues on non-main sequences).\n        for (trace_type, samples) in &samples_by_type {\n            let name = format!(\"{} Samples\", trace_type);\n            self.emit_streaming_profile(\n                &name,\n                samples,\n                &interned_strings,\n                &interned_frames,\n                &interned_callstacks,\n                &mapping,\n            );\n        }\n\n        for ((host, trace_type), samples) in &samples_by_host_type {\n            let name = format!(\"{}: {} Samples\", host, trace_type);\n            self.emit_streaming_profile(\n                &name,\n                samples,\n                &interned_strings,\n                &interned_frames,\n                &interned_callstacks,\n                &mapping,\n            );\n        }\n    }\n\n    fn emit_streaming_profile(\n        &mut self,\n        thread_name: &str,\n        samples: &[Sample],\n        interned_strings: &[InternedString],\n        interned_frames: &[Frame],\n        interned_callstacks: &[Callstack],\n        mapping: &Mapping,\n    ) {\n        if samples.is_empty() {\n            return;\n        }\n\n        let seq_id = self.next_sequence_id;\n        self.next_sequence_id += 1;\n        let fake_tid = seq_id as i32;\n\n        let mut td = PerfettoThreadDescriptor::new();\n        td.pid = Some(1);\n        td.tid = Some(fake_tid);\n        td.thread_name = Some(thread_name.to_string());\n        td.reference_timestamp_us = Some(samples[0].timestamp_us);\n\n        let mut desc_pkt = TracePacket::new();\n        desc_pkt.set_trusted_packet_sequence_id(seq_id);\n        desc_pkt.sequence_flags = Some(1 | 2);\n        desc_pkt.trusted_pid = Some(1);\n        desc_pkt.data = Some(Data::ThreadDescriptor(td));\n        self.packets.push(desc_pkt);\n\n        let mut callstack_iids = Vec::with_capacity(samples.len());\n        let mut timestamp_deltas = Vec::with_capacity(samples.len());\n\n        let mut prev_us = samples[0].timestamp_us;\n        for (idx, s) in samples.iter().enumerate() {\n            callstack_iids.push(s.callstack_iid);\n            if idx == 0 {\n                timestamp_deltas.push(0);\n            } else {\n                timestamp_deltas.push(s.timestamp_us - prev_us);\n                prev_us = s.timestamp_us;\n            }\n        }\n\n        let mut spp = StreamingProfilePacket::new();\n        spp.callstack_iid = callstack_iids;\n        spp.timestamp_delta_us = timestamp_deltas;\n\n        let mut interned_data = InternedData::new();\n        interned_data.function_names = interned_strings.to_vec();\n        interned_data.frames = interned_frames.to_vec();\n        interned_data.callstacks = interned_callstacks.to_vec();\n        interned_data.mappings = vec![mapping.clone()];\n\n        let mut pkt = TracePacket::new();\n        pkt.set_trusted_packet_sequence_id(seq_id);\n        pkt.sequence_flags = Some(2);\n        pkt.trusted_pid = Some(1);\n        pkt.interned_data = MessageField::some(interned_data);\n        pkt.data = Some(Data::StreamingProfilePacket(spp));\n        self.packets.push(pkt);\n    }\n\n    /// Build a ClockSnapshot mapping all clocks with an identity transform.\n    /// All at timestamp 0 with 1ns multiplier, so raw nanosecond values pass through as-is.\n    /// Built-in clocks 1 (MONOTONIC), 3 (REALTIME), 6 (BOOTTIME) are needed because\n    /// some packet types (e.g. AndroidLogPacket) have their timestamps resolved\n    /// internally via built-in clocks.\n    fn make_clock_snapshot() -> ClockSnapshot {\n        let mut cs = ClockSnapshot::new();\n        let make_clock = |id: u32| -> Clock {\n            let mut c = Clock::new();\n            c.clock_id = Some(id);\n            c.timestamp = Some(0);\n            c.unit_multiplier_ns = Some(1);\n            c.is_incremental = Some(false);\n            c\n        };\n        cs.clocks = vec![\n            make_clock(CLOCK_ID_UNIXTIME), // 128 - sequence-scoped\n            make_clock(1),                 // BUILTIN_CLOCK_MONOTONIC\n            make_clock(3),                 // BUILTIN_CLOCK_REALTIME\n            make_clock(6),                 // BUILTIN_CLOCK_BOOTTIME\n        ];\n        cs\n    }\n\n    pub fn build(self) -> Vec<u8> {\n        // ClockSnapshot with timestamp=0 in its own clock (self-referencing).\n        // The trace processor resolves this specially for ClockSnapshot packets,\n        // placing it at the very start of the trace (time 0).\n        let cs = Self::make_clock_snapshot();\n        let mut cs_pkt = TracePacket::new();\n        cs_pkt.set_trusted_packet_sequence_id(SEQUENCE_ID);\n        cs_pkt.sequence_flags = Some(1 | 2);\n        cs_pkt.timestamp = Some(0);\n        cs_pkt.timestamp_clock_id = Some(CLOCK_ID_UNIXTIME);\n        cs_pkt.data = Some(Data::ClockSnapshot(cs));\n\n        let mut trace = Trace::new();\n        trace.packet = Vec::with_capacity(self.packets.len() + 1);\n        trace.packet.push(cs_pkt);\n        trace.packet.extend(self.packets);\n        trace.write_to_bytes().unwrap_or_default()\n    }\n}\n\npub struct PerfettoServer {\n    trace_data: Arc<Mutex<Option<Arc<Vec<u8>>>>>,\n    #[allow(dead_code)]\n    server_thread: Option<std::thread::JoinHandle<()>>,\n}\n\nimpl PerfettoServer {\n    pub fn new() -> Self {\n        let trace_data: Arc<Mutex<Option<Arc<Vec<u8>>>>> = Arc::new(Mutex::new(None));\n        let trace_data_clone = trace_data.clone();\n\n        let server_thread = std::thread::spawn(move || {\n            let server = match tiny_http::Server::http(\"127.0.0.1:9001\") {\n                Ok(s) => s,\n                Err(e) => {\n                    log::error!(\"Failed to start Perfetto HTTP server on port 9001: {}\", e);\n                    return;\n                }\n            };\n            log::info!(\"Perfetto HTTP server listening on port 9001\");\n\n            for request in server.incoming_requests() {\n                let url = request.url().to_string();\n                log::trace!(\"Perfetto HTTP request: {} {}\", request.method(), url);\n\n                if request.method() == &tiny_http::Method::Options {\n                    let response = tiny_http::Response::empty(200)\n                        .with_header(\n                            \"Access-Control-Allow-Origin: *\"\n                                .parse::<tiny_http::Header>()\n                                .unwrap(),\n                        )\n                        .with_header(\n                            \"Access-Control-Allow-Methods: GET, POST, OPTIONS\"\n                                .parse::<tiny_http::Header>()\n                                .unwrap(),\n                        )\n                        .with_header(\n                            \"Access-Control-Allow-Headers: *\"\n                                .parse::<tiny_http::Header>()\n                                .unwrap(),\n                        );\n                    request.respond(response).ok();\n                    continue;\n                }\n\n                if url == \"/trace\" {\n                    let data: Option<Arc<Vec<u8>>> = trace_data_clone.lock().unwrap().clone();\n                    match data {\n                        Some(bytes) => {\n                            let response = tiny_http::Response::from_data((*bytes).clone())\n                                .with_header(\n                                    \"Content-Type: application/octet-stream\"\n                                        .parse::<tiny_http::Header>()\n                                        .unwrap(),\n                                )\n                                .with_header(\n                                    \"Access-Control-Allow-Origin: *\"\n                                        .parse::<tiny_http::Header>()\n                                        .unwrap(),\n                                );\n                            request.respond(response).ok();\n                        }\n                        None => {\n                            let response =\n                                tiny_http::Response::from_string(\"No trace data available\")\n                                    .with_status_code(404)\n                                    .with_header(\n                                        \"Access-Control-Allow-Origin: *\"\n                                            .parse::<tiny_http::Header>()\n                                            .unwrap(),\n                                    );\n                            request.respond(response).ok();\n                        }\n                    }\n                } else {\n                    let response = tiny_http::Response::from_string(\"Not Found\")\n                        .with_status_code(404)\n                        .with_header(\n                            \"Access-Control-Allow-Origin: *\"\n                                .parse::<tiny_http::Header>()\n                                .unwrap(),\n                        );\n                    request.respond(response).ok();\n                }\n            }\n        });\n\n        PerfettoServer {\n            trace_data,\n            server_thread: Some(server_thread),\n        }\n    }\n\n    pub fn set_trace(&self, data: Vec<u8>) {\n        *self.trace_data.lock().unwrap() = Some(Arc::new(data));\n    }\n\n    pub fn get_perfetto_url(&self) -> String {\n        \"https://ui.perfetto.dev/#!/?url=http://127.0.0.1:9001/trace\".to_string()\n    }\n}\n"
  },
  {
    "path": "src/interpreter/query.rs",
    "content": "use anyhow::Result;\nuse chrono::{DateTime, Local};\nuse chrono_tz::Tz;\nuse size::{Base, SizeFormatter, Style};\nuse std::collections::HashMap;\nuse std::fmt;\n\nuse super::clickhouse::Columns;\n\n// Analog of mapFromArrays() in ClickHouse\nfn map_from_arrays<K, V>(keys: Vec<K>, values: Vec<V>) -> HashMap<K, V>\nwhere\n    K: std::hash::Hash + std::cmp::Eq,\n{\n    let mut map = HashMap::new();\n    for (k, v) in keys.into_iter().zip(values) {\n        map.insert(k, v);\n    }\n    return map;\n}\n\n#[derive(Clone, Debug)]\npub struct Query {\n    pub selection: bool,\n    pub host_name: String,\n    pub display_host_name: Option<String>,\n    pub user: String,\n    pub threads: usize,\n    pub memory: i64,\n    pub elapsed: f64,\n    pub query_start_time_microseconds: DateTime<Local>,\n    pub query_end_time_microseconds: DateTime<Local>,\n    // Is the name good enough? Maybe simply \"queries\" or \"shards_queries\"?\n    pub subqueries: u64,\n    pub is_initial_query: bool,\n    pub is_cancelled: bool,\n    pub initial_query_id: String,\n    pub query_id: String,\n    pub normalized_query: String,\n    pub original_query: String,\n    pub current_database: String,\n\n    pub profile_events: HashMap<String, u64>,\n    pub settings: HashMap<String, String>,\n\n    // Used for metric rates (like top(1) shows)\n    pub prev_elapsed: Option<f64>,\n    pub prev_profile_events: Option<HashMap<String, u64>>,\n\n    // If running is true, then the metrics will be shown as per-second rate, otherwise raw data.\n    // Since for system.processes we indeed the rates, while for slow queries/last queries raw\n    // data.\n    pub running: bool,\n}\nimpl Query {\n    /// Creates a Query from a ClickHouse block at the specified row index\n    pub fn from_clickhouse_block(\n        columns: &Columns,\n        row_index: usize,\n        running: bool,\n    ) -> Result<Self> {\n        let mut profile_events = map_from_arrays(\n            columns.get::<Vec<String>, _>(row_index, \"ProfileEvents.Names\")?,\n            columns.get::<Vec<u64>, _>(row_index, \"ProfileEvents.Values\")?,\n        );\n        let mut settings = map_from_arrays(\n            columns.get::<Vec<String>, _>(row_index, \"Settings.Names\")?,\n            columns.get::<Vec<String>, _>(row_index, \"Settings.Values\")?,\n        );\n\n        // FIXME: Shrinking is slow, but without it memory consumption is too high, 100-200x\n        // more! This is because by some reason the capacity inside clickhouse.rs is 4096,\n        // which is ~100x more then we need for ProfileEvents (~40).\n        profile_events.shrink_to_fit();\n        settings.shrink_to_fit();\n\n        Ok(Query {\n            selection: false,\n            host_name: columns.get::<_, _>(row_index, \"host_name\")?,\n            display_host_name: None,\n            user: columns.get::<_, _>(row_index, \"user\")?,\n            threads: columns.get::<u64, _>(row_index, \"peak_threads_usage\")? as usize,\n            memory: columns.get::<_, _>(row_index, \"peak_memory_usage\")?,\n            elapsed: columns.get::<_, _>(row_index, \"elapsed\")?,\n            query_start_time_microseconds: columns\n                .get::<DateTime<Tz>, _>(row_index, \"query_start_time_microseconds\")?\n                .with_timezone(&Local),\n            query_end_time_microseconds: columns\n                .get::<DateTime<Tz>, _>(row_index, \"query_end_time_microseconds\")?\n                .with_timezone(&Local),\n            subqueries: 1, // See queries_count_subqueries()\n            is_initial_query: columns.get::<u8, _>(row_index, \"is_initial_query\")? == 1,\n            is_cancelled: columns.get::<u8, _>(row_index, \"is_cancelled\")? == 1,\n            initial_query_id: columns.get::<_, _>(row_index, \"initial_query_id\")?,\n            query_id: columns.get::<_, _>(row_index, \"query_id\")?,\n            normalized_query: columns.get::<_, _>(row_index, \"normalized_query\")?,\n            original_query: columns.get::<_, _>(row_index, \"original_query\")?,\n            current_database: columns.get::<_, _>(row_index, \"current_database\")?,\n            profile_events,\n            settings,\n            prev_elapsed: None,\n            prev_profile_events: None,\n            running,\n        })\n    }\n\n    // NOTE: maybe it should be corrected with moving sampling?\n    pub fn cpu(&self) -> f64 {\n        if !self.running {\n            let ms = *self\n                .profile_events\n                .get(\"OSCPUVirtualTimeMicroseconds\")\n                .unwrap_or(&0);\n            return (ms as f64) / 1e6 * 100.;\n        }\n\n        if let Some(prev_profile_events) = &self.prev_profile_events {\n            let ms_prev = *prev_profile_events\n                .get(\"OSCPUVirtualTimeMicroseconds\")\n                .unwrap_or(&0);\n            let ms_now = *self\n                .profile_events\n                .get(\"OSCPUVirtualTimeMicroseconds\")\n                .unwrap_or(&0);\n            let elapsed = self.elapsed - self.prev_elapsed.unwrap();\n            if elapsed > 0. {\n                // It is possible to overflow, at least because metrics for initial queries is\n                // summarized, and when query on some node will be finished (non initial), then initial\n                // query will have less data.\n                return ms_now.saturating_sub(ms_prev) as f64 / 1e6 / elapsed * 100.;\n            }\n        }\n\n        let ms = *self\n            .profile_events\n            .get(\"OSCPUVirtualTimeMicroseconds\")\n            .unwrap_or(&0);\n        return (ms as f64) / 1e6 / self.elapsed * 100.;\n    }\n\n    pub fn io_wait(&self) -> f64 {\n        if !self.running {\n            let ms = *self\n                .profile_events\n                .get(\"OSIOWaitMicroseconds\")\n                .unwrap_or(&0);\n            return (ms as f64) / 1e6 * 100.;\n        }\n\n        if let Some(prev_profile_events) = &self.prev_profile_events {\n            let ms_prev = *prev_profile_events\n                .get(\"OSIOWaitMicroseconds\")\n                .unwrap_or(&0);\n            let ms_now = *self\n                .profile_events\n                .get(\"OSIOWaitMicroseconds\")\n                .unwrap_or(&0);\n            let elapsed = self.elapsed - self.prev_elapsed.unwrap();\n            if elapsed > 0. {\n                // It is possible to overflow, at least because metrics for initial queries is\n                // summarized, and when query on some node will be finished (non initial), then initial\n                // query will have less data.\n                return ms_now.saturating_sub(ms_prev) as f64 / 1e6 / elapsed * 100.;\n            }\n        }\n\n        let ms = *self\n            .profile_events\n            .get(\"OSIOWaitMicroseconds\")\n            .unwrap_or(&0);\n        return (ms as f64) / 1e6 / self.elapsed * 100.;\n    }\n\n    pub fn cpu_wait(&self) -> f64 {\n        if !self.running {\n            let ms = *self\n                .profile_events\n                .get(\"OSCPUWaitMicroseconds\")\n                .unwrap_or(&0);\n            return (ms as f64) / 1e6 * 100.;\n        }\n\n        if let Some(prev_profile_events) = &self.prev_profile_events {\n            let ms_prev = *prev_profile_events\n                .get(\"OSCPUWaitMicroseconds\")\n                .unwrap_or(&0);\n            let ms_now = *self\n                .profile_events\n                .get(\"OSCPUWaitMicroseconds\")\n                .unwrap_or(&0);\n            let elapsed = self.elapsed - self.prev_elapsed.unwrap();\n            if elapsed > 0. {\n                // It is possible to overflow, at least because metrics for initial queries is\n                // summarized, and when query on some node will be finished (non initial), then initial\n                // query will have less data.\n                return ms_now.saturating_sub(ms_prev) as f64 / 1e6 / elapsed * 100.;\n            }\n        }\n\n        let ms = *self\n            .profile_events\n            .get(\"OSCPUWaitMicroseconds\")\n            .unwrap_or(&0);\n        return (ms as f64) / 1e6 / self.elapsed * 100.;\n    }\n\n    pub fn net_io(&self) -> f64 {\n        return self.get_per_second_rate_events_multi(&[\n            \"NetworkSendBytes\",\n            \"NetworkReceiveBytes\",\n            \"ReadBufferFromS3Bytes\",\n            \"WriteBufferFromS3Bytes\",\n        ]);\n    }\n\n    pub fn disk_io(&self) -> f64 {\n        return self.get_per_second_rate_events_multi(&[\n            \"WriteBufferFromFileDescriptorWriteBytes\",\n            // Note that it may differs from ReadCompressedBytes, since later takes into account\n            // network.\n            \"ReadBufferFromFileDescriptorReadBytes\",\n        ]);\n    }\n\n    pub fn io(&self) -> f64 {\n        return self.get_per_second_rate_events_multi(&[\n            // Though sometimes it is bigger the the real uncompressed reads, so maybe it is better\n            // to use CompressedReadBufferBytes instead.\n            // But yes it will not take into account non-compressed reads, but this should be rare\n            // (except for the cases when the MergeTree is used with CODEC NONE).\n            \"SelectedBytes\",\n            \"InsertedBytes\",\n        ]);\n    }\n\n    fn get_profile_events_multi(&self, names: &[&'static str]) -> u64 {\n        let mut result: u64 = 0;\n        for &name in names {\n            result += *self.profile_events.get(name).unwrap_or(&0);\n        }\n        return result;\n    }\n    fn get_prev_profile_events_multi(&self, names: &[&'static str]) -> u64 {\n        let mut result: u64 = 0;\n        for &name in names {\n            result += *self\n                .prev_profile_events\n                .as_ref()\n                .unwrap()\n                .get(name)\n                .unwrap_or(&0);\n        }\n        return result;\n    }\n\n    fn get_per_second_rate_events_multi(&self, events: &[&'static str]) -> f64 {\n        if !self.running {\n            return self.get_profile_events_multi(events) as f64;\n        }\n\n        if self.prev_profile_events.is_some() {\n            let now = self.get_profile_events_multi(events);\n            let prev = self.get_prev_profile_events_multi(events);\n            let diff = now.saturating_sub(prev);\n\n            let elapsed = self.elapsed - self.prev_elapsed.unwrap();\n            if elapsed > 0. {\n                return (diff as f64) / elapsed;\n            }\n        }\n\n        let value = self.get_profile_events_multi(events);\n        return value as f64 / self.elapsed;\n    }\n}\n\nimpl fmt::Display for Query {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        let formatter = SizeFormatter::new()\n            .with_base(Base::Base10)\n            .with_style(Style::Abbreviated);\n\n        let memory_str = formatter.format(self.memory);\n        let status = if self.running { \"Running\" } else { \"Finished\" };\n\n        writeln!(f, \"Query ID:         {}\", self.query_id)?;\n        writeln!(f, \"Initial Query ID: {}\", self.initial_query_id)?;\n        writeln!(f, \"Status:           {}\", status)?;\n        writeln!(f, \"Is Initial:       {}\", self.is_initial_query)?;\n        writeln!(f, \"Is Cancelled:     {}\", self.is_cancelled)?;\n        writeln!(f, \"Subqueries:       {}\", self.subqueries)?;\n        writeln!(f, \"Host:             {}\", self.host_name)?;\n        writeln!(f, \"User:             {}\", self.user)?;\n        writeln!(f, \"Database:         {}\", self.current_database)?;\n        writeln!(f, \"Threads:          {}\", self.threads)?;\n        writeln!(f, \"Memory:           {}\", memory_str)?;\n        writeln!(f, \"Elapsed:          {:.2}s\", self.elapsed)?;\n        writeln!(f, \"CPU:              {:.1}%\", self.cpu())?;\n        writeln!(f, \"IO Wait:          {:.1}%\", self.io_wait())?;\n        writeln!(f, \"CPU Wait:         {:.1}%\", self.cpu_wait())?;\n        writeln!(\n            f,\n            \"Start Time:       {}\",\n            self.query_start_time_microseconds\n                .format(\"%Y-%m-%d %H:%M:%S\")\n        )?;\n        writeln!(\n            f,\n            \"End Time:         {}\",\n            self.query_end_time_microseconds.format(\"%Y-%m-%d %H:%M:%S\")\n        )?;\n        writeln!(f, \"Query:\")?;\n        write!(f, \"{}\", self.original_query)\n    }\n}\n"
  },
  {
    "path": "src/interpreter/worker.rs",
    "content": "use crate::{\n    common::{RelativeDateTime, Stopwatch},\n    interpreter::{\n        ContextArc, Query,\n        clickhouse::{ClickHouse, TextLogArguments, TraceType},\n        flamegraph,\n        perfetto::PerfettoTraceBuilder,\n    },\n    pastila,\n    utils::{highlight_sql, share_graph},\n    view::{self, Navigation},\n};\nuse anyhow::{Result, anyhow};\nuse chrono::{DateTime, Local};\n// FIXME: \"leaky abstractions\"\nuse clickhouse_rs::errors::Error as ClickHouseError;\nuse cursive::traits::*;\nuse cursive::views;\nuse futures::channel::mpsc;\nuse std::collections::{HashMap, hash_map::Entry};\nuse std::sync::{Arc, Mutex};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\n#[derive(Debug, Clone)]\npub enum Event {\n    // [filter, limit]\n    ProcessList(String, u64),\n    // [filter, start, end, limit]\n    SlowQueryLog(String, RelativeDateTime, RelativeDateTime, u64),\n    // [filter, start, end, limit]\n    LastQueryLog(String, RelativeDateTime, RelativeDateTime, u64),\n    // (view_name, args)\n    TextLog(&'static str, TextLogArguments),\n    // [bool (true - show in TUI, false - share via pastila), type, start, end]\n    ServerFlameGraph(bool, TraceType, DateTime<Local>, DateTime<Local>),\n    // [bool (true - show in TUI, false - share via pastila)]\n    JemallocFlameGraph(bool),\n    // (type, bool (true - show in TUI, false - open in browser), start time, end time, [query_ids])\n    QueryFlameGraph(\n        TraceType,\n        bool,\n        DateTime<Local>,\n        Option<DateTime<Local>>,\n        Vec<String>,\n    ),\n    // (type, start time, end time, [query_ids_a = before], [query_ids_b = after]).\n    // Diff mode is TUI-only (color-coded via flamelens), no share path.\n    QueryFlameGraphDiff(\n        TraceType,\n        DateTime<Local>,\n        Option<DateTime<Local>>,\n        Vec<String>,\n        Vec<String>,\n    ),\n    // [bool (true - show in TUI, false - open in browser), query_ids]\n    LiveQueryFlameGraph(bool, Option<Vec<String>>),\n    Summary,\n    // query_id\n    KillQuery(String),\n    // (database, query)\n    ExecuteQuery(String, String),\n    // (database, query)\n    ExplainSyntax(String, String, HashMap<String, String>),\n    // (database, query)\n    ExplainPlan(String, String),\n    // (database, query)\n    ExplainPipeline(String, String),\n    // (database, query)\n    ExplainPipelineShareGraph(String, String),\n    // (database, query)\n    ExplainPlanIndexes(String, String),\n    // (database, table)\n    ShowCreateTable(String, String),\n    // (view_name, query)\n    SQLQuery(&'static str, String),\n    // (log_name, database, table, start, end)\n    BackgroundSchedulePoolLogs(\n        Option<String>,\n        String,\n        String,\n        RelativeDateTime,\n        RelativeDateTime,\n    ),\n    // (database, table)\n    TableParts(String, String),\n    // (database, table)\n    AsynchronousInserts(String, String),\n    // (content to share via pastila)\n    ShareLogs(String),\n    // (queries, query_ids, start, end)\n    PerfettoExport(\n        Vec<Query>,\n        Vec<String>,\n        DateTime<Local>,\n        Option<DateTime<Local>>,\n    ),\n    // (start, end)\n    ServerPerfettoExport(DateTime<Local>, DateTime<Local>),\n}\n\nimpl Event {\n    fn enum_key(&self) -> String {\n        match self {\n            Event::ProcessList(..) => \"ProcessList\".to_string(),\n            Event::SlowQueryLog(..) => \"SlowQueryLog\".to_string(),\n            Event::LastQueryLog(..) => \"LastQueryLog\".to_string(),\n            Event::TextLog(..) => \"TextLog\".to_string(),\n            Event::ServerFlameGraph(..) => \"ServerFlameGraph\".to_string(),\n            Event::JemallocFlameGraph(..) => \"JemallocFlameGraph\".to_string(),\n            Event::QueryFlameGraph(..) => \"QueryFlameGraph\".to_string(),\n            Event::QueryFlameGraphDiff(..) => \"QueryFlameGraphDiff\".to_string(),\n            Event::LiveQueryFlameGraph(..) => \"LiveQueryFlameGraph\".to_string(),\n            Event::Summary => \"Summary\".to_string(),\n            Event::KillQuery(..) => \"KillQuery\".to_string(),\n            Event::ExecuteQuery(..) => \"ExecuteQuery\".to_string(),\n            Event::ExplainSyntax(..) => \"ExplainSyntax\".to_string(),\n            Event::ExplainPlan(..) => \"ExplainPlan\".to_string(),\n            Event::ExplainPipeline(..) => \"ExplainPipeline\".to_string(),\n            Event::ExplainPipelineShareGraph(..) => \"ExplainPipelineShareGraph\".to_string(),\n            Event::ExplainPlanIndexes(..) => \"ExplainPlanIndexes\".to_string(),\n            Event::ShowCreateTable(..) => \"ShowCreateTable\".to_string(),\n            Event::SQLQuery(view_name, _query) => format!(\"SQLQuery({})\", view_name),\n            Event::BackgroundSchedulePoolLogs(..) => \"BackgroundSchedulePoolLogs\".to_string(),\n            Event::TableParts(..) => \"TableParts\".to_string(),\n            Event::AsynchronousInserts(..) => \"AsynchronousInserts\".to_string(),\n            Event::ShareLogs(..) => \"ShareLogs\".to_string(),\n            Event::PerfettoExport(..) => \"PerfettoExport\".to_string(),\n            Event::ServerPerfettoExport(..) => \"ServerPerfettoExport\".to_string(),\n        }\n    }\n}\n\ntype ReceiverArc = Arc<Mutex<mpsc::Receiver<Event>>>;\ntype Sender = mpsc::Sender<Event>;\n\npub struct Worker {\n    sender: Sender,\n    sender_by_event: HashMap<String, Sender>,\n    receiver: ReceiverArc,\n    thread: Option<thread::JoinHandle<()>>,\n    paused: bool,\n}\n\n// TODO: can we simplify things with callbacks? (EnumValue(Type))\nimpl Worker {\n    pub fn new() -> Self {\n        // Here the futures::channel::mpsc::channel is used over standard std::sync::mpsc::channel,\n        // since standard does not allow to configure backlog (queue max size), while we uses\n        // channel per distinct event (to avoid running multiple queries for the same view, since\n        // it does not make sense), i.e. separate channel for Summary, separate for\n        // UpdateProcessList and so on.\n        //\n        // Note, by default channel reserves slot for each sender [1].\n        //\n        //   [1]: https://github.com/rust-lang/futures-rs/issues/403\n        let (sender, receiver) = mpsc::channel::<Event>(1);\n        let receiver = Arc::new(Mutex::new(receiver));\n\n        return Worker {\n            sender,\n            sender_by_event: HashMap::new(),\n            receiver,\n            thread: None,\n            paused: false,\n        };\n    }\n\n    pub fn start(&mut self, context: ContextArc) {\n        let receiver = self.receiver.clone();\n        let context = context.clone();\n        self.thread = Some(std::thread::spawn(move || {\n            start_tokio(context, receiver);\n        }));\n    }\n\n    pub fn toggle_pause(&mut self) {\n        self.paused = !self.paused;\n        log::trace!(\n            \"Toggle pause ({})\",\n            if self.paused { \"paused\" } else { \"unpaused\" }\n        );\n    }\n\n    pub fn is_paused(&self) -> bool {\n        return self.paused;\n    }\n\n    // @force - ignore pause\n    pub fn send(&mut self, force: bool, event: Event) {\n        if !force && self.paused {\n            return;\n        }\n\n        let entry = self.sender_by_event.entry(event.enum_key());\n        let channel_created = matches!(&entry, Entry::Vacant(_));\n        let sender = entry.or_insert(self.sender.clone());\n\n        log::trace!(\n            \"Sending event: {:?} (channel created: {})\",\n            &event,\n            channel_created\n        );\n\n        // Simply ignore errors (queue is full, likely update interval is too short).\n        sender.try_send(event.clone()).unwrap_or_else(|e| {\n            log::error!(\n                \"Cannot send event {:?}: {} (too low --delay-interval?)\",\n                event,\n                e\n            )\n        });\n    }\n}\n\n#[tokio::main(flavor = \"current_thread\")]\nasync fn start_tokio(context: ContextArc, receiver: ReceiverArc) {\n    log::info!(\"Event worker started\");\n\n    loop {\n        let event = match receiver.lock().unwrap().try_recv() {\n            Ok(event) => event,\n            // Channel closed.\n            Err(mpsc::TryRecvError::Closed) => break,\n            // No message available.\n            Err(mpsc::TryRecvError::Empty) => {\n                // Same as INPUT_POLL_DELAY_MS, but I hate such implementations, both should be fixed.\n                thread::sleep(Duration::from_millis(30));\n                continue;\n            }\n        };\n        log::trace!(\"Got event: {:?}\", event);\n\n        let mut need_clear = false;\n        let cb_sink = context.lock().unwrap().cb_sink.clone();\n        let options = context.lock().unwrap().options.clone();\n\n        let update_status = |message: &str| {\n            let content = message.to_string();\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.set_statusbar_content(content);\n                }))\n                // Ignore errors on exit\n                .unwrap_or_default();\n        };\n\n        update_status(&format!(\"Processing {}...\", event.enum_key()));\n\n        let debug_metrics = context.lock().unwrap().debug_metrics.clone();\n        // RAII: decrements on scope exit, including panic or early return paths.\n        let _in_flight = debug_metrics.track_in_flight();\n        let stopwatch = Stopwatch::start_new();\n        if let Err(err) = process_event(context.clone(), event.clone(), &mut need_clear).await {\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    let is_paused = siv\n                        .user_data::<ContextArc>()\n                        .unwrap()\n                        .lock()\n                        .unwrap()\n                        .worker\n                        .is_paused();\n                    if !is_paused {\n                        siv.toggle_pause_updates(Some(\"due previous errors\"));\n                    }\n\n                    const CLICKHOUSE_ERROR_CODE_ALL_CONNECTION_TRIES_FAILED: u32 = 279;\n                    let has_cluster = siv\n                        .user_data::<ContextArc>()\n                        .unwrap()\n                        .lock()\n                        .unwrap()\n                        .options\n                        .clickhouse\n                        .cluster\n                        .as_ref()\n                        .is_some_and(|v| !v.is_empty());\n                    if has_cluster\n                        && let Some(ClickHouseError::Server(server_error)) =\n                            &err.downcast_ref::<ClickHouseError>()\n                        && server_error.code == CLICKHOUSE_ERROR_CODE_ALL_CONNECTION_TRIES_FAILED\n                    {\n                        siv.add_layer(views::Dialog::info(format!(\n                            \"{}\\n(consider adding skip_unavailable_shards=1 to the connection URL)\",\n                            err\n                        )));\n                        return;\n                    }\n\n                    siv.add_layer(views::Dialog::info(err.to_string()));\n                }))\n                // Ignore errors on exit\n                .unwrap_or_default();\n        }\n        let elapsed = stopwatch.elapsed();\n        debug_metrics.record_event(elapsed);\n        let mut completion_status = format!(\n            \"Processing {} took {} ms.\",\n            event.enum_key(),\n            elapsed.as_millis()\n        );\n\n        // It should not be reset, since delay_interval should be set to the maximum service\n        // query duration time.\n        if stopwatch.elapsed() > options.view.delay_interval {\n            completion_status.push_str(\" (consider increasing --delay_interval)\");\n        }\n\n        update_status(&completion_status);\n\n        cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                if need_clear {\n                    siv.complete_clear();\n                }\n                siv.on_event(cursive::event::Event::Refresh);\n            }))\n            // Ignore errors on exit\n            .unwrap_or_default();\n    }\n\n    log::info!(\"Event worker finished\");\n}\n\nasync fn render_or_share_flamegraph(\n    tui: bool,\n    cb_sink: cursive::CbSink,\n    title: &'static str,\n    data: String,\n    pastila_clickhouse_host: String,\n    pastila_url: String,\n) -> Result<()> {\n    if tui {\n        cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                flamegraph::show(title, data)\n                    .or_else(|e| {\n                        siv.add_layer(views::Dialog::info(e.to_string()));\n                        return anyhow::Ok(());\n                    })\n                    .unwrap();\n            }))\n            .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n    } else {\n        let url = flamegraph::share(data, &pastila_clickhouse_host, &pastila_url).await?;\n\n        let url_clone = url.clone();\n        cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(\n                    views::Dialog::text(format!(\"Flamegraph shared (encrypted):\\n\\n{}\", url))\n                        .title(\"Share Complete\")\n                        .button(\"Close\", |siv| {\n                            siv.pop_layer();\n                        }),\n                );\n            }))\n            .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n\n        crate::utils::open_url_command(&url_clone).status()?;\n    }\n    return Ok(());\n}\n\nuse crate::interpreter::options::ChDigPerfettoConfig;\n\nasync fn fetch_and_populate_perfetto_trace(\n    clickhouse: &Arc<ClickHouse>,\n    builder: &mut PerfettoTraceBuilder,\n    cfg: &ChDigPerfettoConfig,\n    query_ids: Option<&[String]>,\n    start: DateTime<Local>,\n    end_time: DateTime<Local>,\n) {\n    let (otel, trace_log, metrics, parts, threads, stack_traces, text_logs) = tokio::join!(\n        async {\n            if cfg.opentelemetry_span_log {\n                Some(\n                    clickhouse\n                        .get_otel_spans_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.trace_log {\n                Some(\n                    clickhouse\n                        .get_trace_log_counters_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.query_metric_log {\n                Some(\n                    clickhouse\n                        .get_query_metrics_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.part_log {\n                Some(\n                    clickhouse\n                        .get_part_log_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.query_thread_log {\n                Some(\n                    clickhouse\n                        .get_query_thread_log_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.trace_log {\n                Some(\n                    clickhouse\n                        .get_stack_traces_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.text_log {\n                Some(\n                    clickhouse\n                        .get_text_log_for_perfetto(query_ids, start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n    );\n\n    match otel {\n        Some(Ok(block)) => builder.add_otel_spans(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch opentelemetry_span_log: {}\", e),\n        None => {}\n    }\n    match trace_log {\n        Some(Ok(block)) => builder.add_trace_log_counters(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch trace_log counters: {}\", e),\n        None => {}\n    }\n    match metrics {\n        Some(Ok(rows)) => builder.add_query_metrics(&rows),\n        Some(Err(e)) => log::warn!(\"Failed to fetch query_metric_log: {}\", e),\n        None => {}\n    }\n    match parts {\n        Some(Ok(block)) => builder.add_part_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch part_log: {}\", e),\n        None => {}\n    }\n    match threads {\n        Some(Ok(block)) => builder.add_query_thread_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch query_thread_log: {}\", e),\n        None => {}\n    }\n    match stack_traces {\n        Some(Ok(block)) => builder.add_stack_traces(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch trace_log stack traces: {}\", e),\n        None => {}\n    }\n    match text_logs {\n        Some(Ok(block)) => builder.add_text_logs(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch text_log: {}\", e),\n        None => {}\n    }\n}\n\nasync fn fetch_server_perfetto_sources(\n    clickhouse: &Arc<ClickHouse>,\n    builder: &mut PerfettoTraceBuilder,\n    cfg: &ChDigPerfettoConfig,\n    start: DateTime<Local>,\n    end_time: DateTime<Local>,\n) {\n    let (\n        metric_log,\n        async_metric_log,\n        async_insert_log,\n        error_log,\n        s3_queue_log,\n        azure_queue_log,\n        blob_storage_log,\n        bg_pool_log,\n        session_log,\n        zk_log,\n    ) = tokio::join!(\n        async {\n            if cfg.metric_log {\n                Some(\n                    clickhouse\n                        .get_metric_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.asynchronous_metric_log {\n                Some(\n                    clickhouse\n                        .get_asynchronous_metric_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.asynchronous_insert_log {\n                Some(\n                    clickhouse\n                        .get_asynchronous_insert_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.error_log {\n                Some(clickhouse.get_error_log_for_perfetto(start, end_time).await)\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.s3_queue_log {\n                Some(\n                    clickhouse\n                        .get_s3_queue_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.azure_queue_log {\n                Some(\n                    clickhouse\n                        .get_azure_queue_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.blob_storage_log {\n                Some(\n                    clickhouse\n                        .get_blob_storage_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.background_schedule_pool_log {\n                Some(\n                    clickhouse\n                        .get_background_schedule_pool_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.session_log {\n                Some(\n                    clickhouse\n                        .get_session_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n        async {\n            if cfg.aggregated_zookeeper_log {\n                Some(\n                    clickhouse\n                        .get_aggregated_zookeeper_log_for_perfetto(start, end_time)\n                        .await,\n                )\n            } else {\n                None\n            }\n        },\n    );\n\n    match metric_log {\n        Some(Ok(rows)) => builder.add_metric_log(&rows),\n        Some(Err(e)) => log::warn!(\"Failed to fetch metric_log: {}\", e),\n        None => {}\n    }\n    match async_metric_log {\n        Some(Ok(block)) => builder.add_asynchronous_metric_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch asynchronous_metric_log: {}\", e),\n        None => {}\n    }\n    match async_insert_log {\n        Some(Ok(block)) => builder.add_asynchronous_insert_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch asynchronous_insert_log: {}\", e),\n        None => {}\n    }\n    match error_log {\n        Some(Ok(block)) => builder.add_error_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch error_log: {}\", e),\n        None => {}\n    }\n    match s3_queue_log {\n        Some(Ok(block)) => builder.add_s3_queue_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch s3queue_log: {}\", e),\n        None => {}\n    }\n    match azure_queue_log {\n        Some(Ok(block)) => builder.add_azure_queue_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch azure_queue_log: {}\", e),\n        None => {}\n    }\n    match blob_storage_log {\n        Some(Ok(block)) => builder.add_blob_storage_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch blob_storage_log: {}\", e),\n        None => {}\n    }\n    match bg_pool_log {\n        Some(Ok(block)) => builder.add_background_pool_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch background_schedule_pool_log: {}\", e),\n        None => {}\n    }\n    match session_log {\n        Some(Ok(block)) => builder.add_session_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch session_log: {}\", e),\n        None => {}\n    }\n    match zk_log {\n        Some(Ok(block)) => builder.add_aggregated_zookeeper_log(&block),\n        Some(Err(e)) => log::warn!(\"Failed to fetch aggregated_zookeeper_log: {}\", e),\n        None => {}\n    }\n}\n\nfn serve_perfetto_trace(\n    context: ContextArc,\n    cb_sink: cursive::CbSink,\n    builder: PerfettoTraceBuilder,\n) -> Result<()> {\n    let data = builder.build();\n    let data_len = data.len();\n    if let Err(e) = std::fs::write(\"/tmp/chdig_perfetto.pftrace\", &data) {\n        log::warn!(\"Failed to save debug trace: {}\", e);\n    } else {\n        log::info!(\n            \"Saved debug trace to /tmp/chdig_perfetto.pftrace ({} bytes)\",\n            data_len\n        );\n    }\n\n    let server = context.lock().unwrap().get_or_start_perfetto_server();\n    server.set_trace(data);\n    let url = server.get_perfetto_url();\n\n    let url_clone = url.clone();\n    cb_sink\n        .send(Box::new(move |siv: &mut cursive::Cursive| {\n            siv.add_layer(\n                views::Dialog::text(format!(\n                    \"Perfetto trace exported ({} bytes)\\n\\nOpening: {}\",\n                    data_len, url\n                ))\n                .title(\"Perfetto Export\")\n                .button(\"Close\", |siv| {\n                    siv.pop_layer();\n                }),\n            );\n        }))\n        .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n\n    crate::utils::open_url_command(&url_clone).status()?;\n    Ok(())\n}\n\nasync fn process_event(context: ContextArc, event: Event, need_clear: &mut bool) -> Result<()> {\n    let cb_sink = context.lock().unwrap().cb_sink.clone();\n    let clickhouse = context.lock().unwrap().clickhouse.clone();\n    let pastila_clickhouse_host = context\n        .lock()\n        .unwrap()\n        .options\n        .service\n        .pastila_clickhouse_host\n        .clone();\n    let pastila_url = context.lock().unwrap().options.service.pastila_url.clone();\n    let selected_host = context.lock().unwrap().selected_host.clone();\n\n    match event {\n        Event::ProcessList(filter, limit) => {\n            let block = clickhouse\n                .get_processlist(filter, limit, selected_host.as_ref())\n                .await?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.call_on_name_or_render_error(\n                        \"processes\",\n                        move |view: &mut views::OnEventView<view::QueriesView>| {\n                            return view.get_inner_mut().update(block);\n                        },\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::SlowQueryLog(filter, start, end, limit) => {\n            let block = clickhouse\n                .get_slow_query_log(&filter, start, end, limit, selected_host.as_ref())\n                .await?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.call_on_name_or_render_error(\n                        \"slow_query_log\",\n                        move |view: &mut views::OnEventView<view::QueriesView>| {\n                            return view.get_inner_mut().update(block);\n                        },\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::LastQueryLog(filter, start, end, limit) => {\n            let block = clickhouse\n                .get_last_query_log(&filter, start, end, limit, selected_host.as_ref())\n                .await?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.call_on_name_or_render_error(\n                        \"last_query_log\",\n                        move |view: &mut views::OnEventView<view::QueriesView>| {\n                            return view.get_inner_mut().update(block);\n                        },\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::TextLog(view_name, args) => {\n            let block = clickhouse.get_query_logs(&args).await?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.call_on_name_or_render_error(\n                        view_name,\n                        move |view: &mut view::TextLogView| {\n                            return view.update(block);\n                        },\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ServerFlameGraph(tui, trace_type, start, end) => {\n            let flamegraph_block = clickhouse\n                .get_flamegraph(\n                    trace_type,\n                    None,\n                    Some(start),\n                    Some(end),\n                    selected_host.as_ref(),\n                )\n                .await?;\n            render_or_share_flamegraph(\n                tui,\n                cb_sink,\n                \"Server\",\n                flamegraph::block_to_folded(&flamegraph_block),\n                pastila_clickhouse_host,\n                pastila_url,\n            )\n            .await?;\n            *need_clear = true;\n        }\n        Event::JemallocFlameGraph(tui) => {\n            let flamegraph_block = clickhouse\n                .get_jemalloc_flamegraph(selected_host.as_ref())\n                .await?;\n            render_or_share_flamegraph(\n                tui,\n                cb_sink,\n                \"jemalloc\",\n                flamegraph::block_to_folded(&flamegraph_block),\n                pastila_clickhouse_host,\n                pastila_url,\n            )\n            .await?;\n            *need_clear = true;\n        }\n        Event::QueryFlameGraph(trace_type, tui, start, end, query_ids) => {\n            let flamegraph_block = clickhouse\n                .get_flamegraph(\n                    trace_type,\n                    Some(&query_ids),\n                    Some(start),\n                    end,\n                    selected_host.as_ref(),\n                )\n                .await?;\n            render_or_share_flamegraph(\n                tui,\n                cb_sink,\n                \"Query\",\n                flamegraph::block_to_folded(&flamegraph_block),\n                pastila_clickhouse_host,\n                pastila_url,\n            )\n            .await?;\n            *need_clear = true;\n        }\n        Event::QueryFlameGraphDiff(trace_type, start, end, query_ids_a, query_ids_b) => {\n            let (block_a, block_b) = tokio::try_join!(\n                clickhouse.get_flamegraph(\n                    trace_type.clone(),\n                    Some(&query_ids_a),\n                    Some(start),\n                    end,\n                    selected_host.as_ref(),\n                ),\n                clickhouse.get_flamegraph(\n                    trace_type,\n                    Some(&query_ids_b),\n                    Some(start),\n                    end,\n                    selected_host.as_ref(),\n                ),\n            )?;\n            let before = flamegraph::block_to_folded(&block_a);\n            let after = flamegraph::block_to_folded(&block_b);\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    flamegraph::show_diff(\"Query diff\", before, after)\n                        .or_else(|e| {\n                            siv.add_layer(views::Dialog::info(e.to_string()));\n                            return anyhow::Ok(());\n                        })\n                        .unwrap();\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n            *need_clear = true;\n        }\n        Event::LiveQueryFlameGraph(tui, query_ids) => {\n            let flamegraph_block = clickhouse\n                .get_live_query_flamegraph(&query_ids, selected_host.as_ref())\n                .await?;\n            render_or_share_flamegraph(\n                tui,\n                cb_sink,\n                \"Query (live)\",\n                flamegraph::block_to_folded(&flamegraph_block),\n                pastila_clickhouse_host,\n                pastila_url,\n            )\n            .await?;\n            *need_clear = true;\n        }\n        Event::ExplainPlanIndexes(database, query) => {\n            let plan = clickhouse\n                .explain_plan_indexes(database.as_str(), query.as_str())\n                .await?\n                .join(\"\\n\");\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(\n                            views::LinearLayout::vertical()\n                                .child(views::TextView::new(\"EXPLAIN PLAN indexes=1\").center())\n                                .child(views::DummyView.fixed_height(1))\n                                .child(views::TextView::new(plan)),\n                        )\n                        .scrollable(),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ExecuteQuery(database, query) => {\n            let stopwatch = Stopwatch::start_new();\n            clickhouse\n                .execute_query(database.as_str(), query.as_str())\n                .await?;\n            // TODO: print results?\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(views::Dialog::info(format!(\n                        \"Query executed ({} ms). Look results in 'Last queries'\",\n                        stopwatch.elapsed_ms(),\n                    )));\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ExplainSyntax(database, query, settings) => {\n            let query = clickhouse\n                .explain_syntax(database.as_str(), query.as_str(), &settings)\n                .await?\n                .join(\"\\n\");\n            let query = highlight_sql(&query)?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(\n                            views::LinearLayout::vertical()\n                                .child(views::TextView::new(\"EXPLAIN SYNTAX\").center())\n                                .child(views::DummyView.fixed_height(1))\n                                .child(views::TextView::new(query)),\n                        )\n                        .scrollable(),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ExplainPlan(database, query) => {\n            let plan = clickhouse\n                .explain_plan(database.as_str(), query.as_str())\n                .await?\n                .join(\"\\n\");\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(\n                            views::LinearLayout::vertical()\n                                .child(views::TextView::new(\"EXPLAIN PLAN\").center())\n                                .child(views::DummyView.fixed_height(1))\n                                .child(views::TextView::new(plan)),\n                        )\n                        .scrollable(),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ExplainPipeline(database, query) => {\n            let pipeline = clickhouse\n                .explain_pipeline(database.as_str(), query.as_str())\n                .await?\n                .join(\"\\n\");\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(\n                            views::LinearLayout::vertical()\n                                .child(views::TextView::new(\"EXPLAIN PIPELINE\").center())\n                                .child(views::DummyView.fixed_height(1))\n                                .child(views::TextView::new(pipeline)),\n                        )\n                        .scrollable(),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ExplainPipelineShareGraph(database, query) => {\n            let pipeline = clickhouse\n                .explain_pipeline_graph(database.as_str(), query.as_str())\n                .await?\n                .join(\"\\n\");\n\n            // Upload graph to pastila and open in browser\n            match share_graph(pipeline, &pastila_clickhouse_host, &pastila_url).await {\n                Ok(_) => {}\n                Err(err) => {\n                    let error_msg = err.to_string();\n                    cb_sink\n                        .send(Box::new(move |siv: &mut cursive::Cursive| {\n                            siv.add_layer(views::Dialog::info(error_msg));\n                        }))\n                        .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n                }\n            }\n        }\n        Event::ShowCreateTable(database, table) => {\n            let create_statement = clickhouse\n                .show_create_table(database.as_str(), table.as_str())\n                .await?;\n            let create_statement = highlight_sql(&create_statement)?;\n            let title = format!(\"CREATE TABLE {}.{}\", database, table);\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(views::TextView::new(create_statement).scrollable())\n                            .title(title),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::KillQuery(query_id) => {\n            let start = Instant::now();\n            let ret = clickhouse.kill_query(query_id.as_str()).await;\n            let elapsed = start.elapsed();\n            // NOTE: should we do this via cursive, to block the UI?\n            let message;\n            if let Err(err) = ret {\n                message = format!(\"{} (elapsed: {:?})\", err, elapsed);\n            } else {\n                message = format!(\"Query {} killed (elapsed: {:?})\", query_id, elapsed);\n            }\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(views::Dialog::info(message));\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::Summary => {\n            let block = clickhouse.get_summary(selected_host.as_ref()).await;\n            match block {\n                Err(err) => {\n                    let message = err.to_string();\n                    cb_sink\n                        .send(Box::new(move |siv: &mut cursive::Cursive| {\n                            siv.add_layer(views::Dialog::info(message));\n                        }))\n                        .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n                }\n                Ok(summary) => {\n                    cb_sink\n                        .send(Box::new(move |siv: &mut cursive::Cursive| {\n                            siv.call_on_name(\"summary\", move |view: &mut view::SummaryView| {\n                                view.update(summary);\n                            });\n                        }))\n                        .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n                }\n            }\n        }\n        Event::SQLQuery(view_name, query) => {\n            let block = clickhouse.execute(query.as_str()).await?;\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    log::trace!(\n                        \"Updating {} (with block of {} rows)\",\n                        view_name,\n                        block.row_count()\n                    );\n                    // TODO: update specific view (can we accept type somehow in the enum?)\n                    siv.call_on_name_or_render_error(\n                        view_name,\n                        move |view: &mut views::OnEventView<view::SQLQueryView>| {\n                            return view.get_inner_mut().update(block);\n                        },\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::BackgroundSchedulePoolLogs(log_name, database, table, start, end) => {\n            let query_ids = clickhouse\n                .get_background_schedule_pool_query_ids(\n                    log_name.clone(),\n                    database.clone(),\n                    table.clone(),\n                    start.clone(),\n                    end.clone(),\n                    selected_host.as_ref(),\n                )\n                .await?;\n\n            if query_ids.is_empty() {\n                let error_msg = if let Some(log_name) = log_name {\n                    format!(\n                        \"No entries for {} jobs (database: {}, table: {}, start: {}, end: {})\",\n                        log_name, database, table, start, end\n                    )\n                } else {\n                    format!(\n                        \"No entries for {}.{} (start: {}, end: {})\",\n                        database, table, start, end\n                    )\n                };\n                return Err(anyhow!(error_msg));\n            }\n\n            let title = if let Some(ref log_name) = log_name {\n                format!(\"Logs for task: {}\", log_name)\n            } else {\n                format!(\"Logs for tasks of {}.{}\", database, table)\n            };\n\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    use cursive::view::Resizable;\n                    let context = siv.user_data::<ContextArc>().unwrap().clone();\n                    siv.add_layer(views::Dialog::around(\n                        views::LinearLayout::vertical()\n                            .child(views::TextView::new(title).center())\n                            .child(views::DummyView.fixed_height(1))\n                            .child(views::NamedView::new(\n                                \"background_schedule_pool_logs\",\n                                view::TextLogView::new(\n                                    \"background_schedule_pool_logs\",\n                                    context,\n                                    TextLogArguments {\n                                        query_ids: Some(query_ids),\n                                        logger_names: None,\n                                        hostname: None,\n                                        message_filter: None,\n                                        max_level: None,\n                                        start: start.into(),\n                                        end,\n                                    },\n                                ),\n                            )),\n                    ));\n                    siv.focus_name(\"background_schedule_pool_logs\").unwrap();\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::TableParts(database, table) => {\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    let context = siv.user_data::<ContextArc>().unwrap().clone();\n                    crate::view::providers::table_parts::show_table_parts_dialog(\n                        siv,\n                        context,\n                        Some(database),\n                        Some(table),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::AsynchronousInserts(database, table) => {\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    let context = siv.user_data::<ContextArc>().unwrap().clone();\n                    crate::view::providers::asynchronous_inserts::show_asynchronous_inserts_dialog(\n                        siv,\n                        context,\n                        Some(database),\n                        Some(table),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n        }\n        Event::ShareLogs(content) => {\n            let url =\n                pastila::upload_encrypted(&content, &pastila_clickhouse_host, &pastila_url).await?;\n\n            let url_clone = url.clone();\n            cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.pop_layer();\n                    siv.add_layer(\n                        views::Dialog::text(format!(\"Logs shared (encrypted):\\n\\n{}\", url))\n                            .title(\"Share Complete\")\n                            .button(\"Close\", |siv| {\n                                siv.pop_layer();\n                            }),\n                    );\n                }))\n                .map_err(|_| anyhow!(\"Cannot send message to UI\"))?;\n\n            crate::utils::open_url_command(&url_clone).status()?;\n        }\n        Event::PerfettoExport(queries, query_ids, start, end) => {\n            let perfetto_cfg = context.lock().unwrap().options.perfetto.clone();\n            let end_time = end.unwrap_or_else(Local::now) + chrono::TimeDelta::seconds(1);\n            let mut builder =\n                PerfettoTraceBuilder::new(perfetto_cfg.per_server, perfetto_cfg.text_log_android);\n\n            for q in &queries {\n                log::info!(\n                    \"Perfetto query: id={} start_ns={} end_ns={} elapsed={}\",\n                    q.query_id,\n                    q.query_start_time_microseconds\n                        .timestamp_nanos_opt()\n                        .unwrap_or(0),\n                    q.query_end_time_microseconds\n                        .timestamp_nanos_opt()\n                        .unwrap_or(0),\n                    q.elapsed,\n                );\n            }\n            builder.add_queries(&queries);\n            fetch_and_populate_perfetto_trace(\n                &clickhouse,\n                &mut builder,\n                &perfetto_cfg,\n                Some(&query_ids),\n                start,\n                end_time,\n            )\n            .await;\n            serve_perfetto_trace(context.clone(), cb_sink, builder)?;\n        }\n        Event::ServerPerfettoExport(start, end) => {\n            let perfetto_cfg = context.lock().unwrap().options.perfetto.clone();\n            let query_block = clickhouse.get_queries_for_perfetto(start, end).await?;\n            let mut queries = Vec::new();\n            for i in 0..query_block.row_count() {\n                match Query::from_clickhouse_block(&query_block, i, false) {\n                    Ok(q) => queries.push(q),\n                    Err(e) => log::warn!(\"Perfetto: failed to parse query row {}: {}\", i, e),\n                }\n            }\n            let end_time = end + chrono::TimeDelta::seconds(1);\n            let mut builder =\n                PerfettoTraceBuilder::new(perfetto_cfg.per_server, perfetto_cfg.text_log_android);\n            builder.add_queries(&queries);\n            fetch_and_populate_perfetto_trace(\n                &clickhouse,\n                &mut builder,\n                &perfetto_cfg,\n                None,\n                start,\n                end_time,\n            )\n            .await;\n            fetch_server_perfetto_sources(\n                &clickhouse,\n                &mut builder,\n                &perfetto_cfg,\n                start,\n                end_time,\n            )\n            .await;\n            serve_perfetto_trace(context.clone(), cb_sink, builder)?;\n        }\n    }\n\n    return Ok(());\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "mod actions;\nmod common;\nmod interpreter;\nmod pastila;\nmod utils;\nmod view;\n\nmod bin;\npub use bin::chdig_main;\npub use bin::chdig_main_async;\n"
  },
  {
    "path": "src/main.rs",
    "content": "use anyhow::Result;\nuse chdig::chdig_main_async;\nuse std::env::args_os;\n\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() -> Result<()> {\n    #[cfg(feature = \"tokio-console\")]\n    console_subscriber::init();\n\n    return chdig_main_async(args_os()).await;\n}\n"
  },
  {
    "path": "src/pastila.rs",
    "content": "use aes_gcm::{\n    Aes128Gcm, KeyInit, Nonce,\n    aead::{Aead, generic_array::GenericArray},\n};\nuse anyhow::{Result, anyhow};\nuse base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};\nuse clickhouse_rs::{Block, Options, Pool};\nuse rand::RngCore;\nuse regex::Regex;\nuse std::str::FromStr;\nuse url::Url;\n\n/// ClickHouse's SipHash-2-4 implementation (128-bit version)\n/// See https://github.com/ClickHouse/ClickHouse/pull/46065 for details\npub struct ClickHouseSipHash {\n    v0: u64,\n    v1: u64,\n    v2: u64,\n    v3: u64,\n    cnt: u64,\n    current_word: u64,\n    current_bytes_len: usize,\n}\n\nimpl ClickHouseSipHash {\n    pub fn new() -> Self {\n        Self {\n            v0: 0x736f6d6570736575u64,\n            v1: 0x646f72616e646f6du64,\n            v2: 0x6c7967656e657261u64,\n            v3: 0x7465646279746573u64,\n            cnt: 0,\n            current_word: 0,\n            current_bytes_len: 0,\n        }\n    }\n\n    #[inline]\n    fn sipround(&mut self) {\n        self.v0 = self.v0.wrapping_add(self.v1);\n        self.v1 = self.v1.rotate_left(13);\n        self.v1 ^= self.v0;\n        self.v0 = self.v0.rotate_left(32);\n\n        self.v2 = self.v2.wrapping_add(self.v3);\n        self.v3 = self.v3.rotate_left(16);\n        self.v3 ^= self.v2;\n\n        self.v0 = self.v0.wrapping_add(self.v3);\n        self.v3 = self.v3.rotate_left(21);\n        self.v3 ^= self.v0;\n\n        self.v2 = self.v2.wrapping_add(self.v1);\n        self.v1 = self.v1.rotate_left(17);\n        self.v1 ^= self.v2;\n        self.v2 = self.v2.rotate_left(32);\n    }\n\n    pub fn write(&mut self, data: &[u8]) {\n        for &byte in data {\n            let byte_idx = self.current_bytes_len;\n            self.current_word |= (byte as u64) << (byte_idx * 8);\n            self.current_bytes_len += 1;\n            self.cnt += 1;\n\n            if self.current_bytes_len == 8 {\n                self.v3 ^= self.current_word;\n                self.sipround();\n                self.sipround();\n                self.v0 ^= self.current_word;\n\n                self.current_word = 0;\n                self.current_bytes_len = 0;\n            }\n        }\n    }\n\n    pub fn finish128(mut self) -> u128 {\n        let cnt_byte = (self.cnt % 256) as u8;\n        self.current_word |= (cnt_byte as u64) << 56;\n\n        self.v3 ^= self.current_word;\n        self.sipround();\n        self.sipround();\n        self.v0 ^= self.current_word;\n\n        self.v2 ^= 0xff;\n        self.sipround();\n        self.sipround();\n        self.sipround();\n        self.sipround();\n\n        let low = self.v0 ^ self.v1;\n        let high = self.v2 ^ self.v3;\n\n        ((high as u128) << 64) | (low as u128)\n    }\n}\n\npub fn calculate_hash(text: &str) -> String {\n    let mut hasher = ClickHouseSipHash::new();\n    hasher.write(text.as_bytes());\n    let hash = hasher.finish128();\n    format!(\"{:032x}\", hash.swap_bytes())\n}\n\npub fn get_fingerprint(text: &str) -> String {\n    let re = Regex::new(r\"\\b\\w{4,100}\\b\").unwrap();\n    let words: Vec<&str> = re.find_iter(text).map(|m| m.as_str()).collect();\n\n    if words.len() < 3 {\n        return \"ffffffff\".to_string();\n    }\n\n    let mut min_hash: Option<u128> = None;\n\n    for i in 0..words.len().saturating_sub(2) {\n        let triplet = format!(\"{} {} {}\", words[i], words[i + 1], words[i + 2]);\n        let mut hasher = ClickHouseSipHash::new();\n        hasher.write(triplet.as_bytes());\n        let hash_value = hasher.finish128();\n\n        min_hash = Some(min_hash.map_or(hash_value, |current| current.min(hash_value)));\n    }\n\n    let full_hash = match min_hash {\n        Some(hash) => format!(\"{:032x}\", hash.swap_bytes()),\n        None => \"ffffffffffffffffffffffffffffffff\".to_string(),\n    };\n    full_hash[..8].to_string()\n}\n\nfn encrypt_content(content: &str, key: &[u8; 16]) -> Result<String> {\n    let cipher = Aes128Gcm::new(GenericArray::from_slice(key));\n    let nonce = Nonce::from_slice(&key[..12]);\n\n    let ciphertext = cipher\n        .encrypt(nonce, content.as_bytes())\n        .map_err(|e| anyhow!(\"Encryption failed: {}\", e))?;\n\n    Ok(BASE64.encode(&ciphertext))\n}\n\nasync fn get_pastila_client(pastila_clickhouse_host: &str) -> Result<clickhouse_rs::ClientHandle> {\n    let url = {\n        let http_url = Url::parse(pastila_clickhouse_host)?;\n        let host = http_url\n            .host_str()\n            .ok_or_else(|| anyhow!(\"No host in pastila_clickhouse_host\"))?;\n\n        let user = if !http_url.username().is_empty() {\n            http_url.username().to_string()\n        } else {\n            http_url\n                .query_pairs()\n                .find(|(k, _)| k == \"user\")\n                .map(|(_, v)| v.to_string())\n                .unwrap_or_else(|| \"default\".to_string())\n        };\n\n        let secure = http_url.scheme() == \"https\";\n        let port = if secure { 9440 } else { 9000 };\n\n        format!(\n            \"tcp://{}@{}:{}/?secure={}&connection_timeout=5s\",\n            user, host, port, secure\n        )\n    };\n    let options = Options::from_str(&url)?;\n    let pool = Pool::new(options);\n    let client = pool.get_handle().await?;\n    Ok(client)\n}\n\npub async fn upload_encrypted(\n    content: &str,\n    pastila_clickhouse_host: &str,\n    pastila_url: &str,\n) -> Result<String> {\n    let mut key = [0u8; 16];\n    rand::thread_rng().fill_bytes(&mut key);\n    let encrypted = encrypt_content(content, &key)?;\n\n    let fingerprint_hex = get_fingerprint(&encrypted);\n    let hash_hex = calculate_hash(&encrypted);\n\n    log::info!(\n        \"Uploading {} bytes ({} bytes encrypted) to {}\",\n        content.len(),\n        encrypted.len(),\n        pastila_clickhouse_host\n    );\n\n    {\n        let mut client = get_pastila_client(pastila_clickhouse_host).await?;\n        let block = Block::new()\n            .column(\"fingerprint_hex\", vec![fingerprint_hex.as_str()])\n            .column(\"hash_hex\", vec![hash_hex.as_str()])\n            .column(\"content\", vec![encrypted.as_str()])\n            .column(\"is_encrypted\", vec![1_u8]);\n        client.insert(\"paste.data\", block).await?;\n    }\n\n    let pastila_url = pastila_url.trim_end_matches('/');\n    let key_fragment = format!(\"#{}\", BASE64.encode(key));\n    let pastila_page_url = format!(\n        \"{}/?{}/{}{}GCM\",\n        pastila_url, fingerprint_hex, hash_hex, key_fragment\n    );\n\n    Ok(pastila_page_url)\n}\n"
  },
  {
    "path": "src/utils.rs",
    "content": "use crate::actions::ActionDescription;\nuse crate::pastila;\nuse crate::view::Navigation;\nuse anyhow::{Context, Error, Result};\nuse cursive::Cursive;\nuse cursive::align::HAlign;\nuse cursive::event;\nuse cursive::utils::markup::StyledString;\nuse cursive::view::Nameable;\nuse cursive::views::{EditView, LinearLayout, OnEventView, Panel, SelectView};\nuse fuzzy_matcher::FuzzyMatcher;\nuse fuzzy_matcher::skim::SkimMatcherV2;\nuse std::collections::HashMap;\nuse std::env;\nuse std::fs;\nuse std::io::Write;\nuse std::process::{Command, Stdio};\nuse syntect::{highlighting::ThemeSet, parsing::SyntaxSet};\nuse tempfile::Builder;\n\n/// RAII guard that leaves cursive's terminal state (raw mode, alternate screen,\n/// mouse capture, hidden cursor) and restores it on drop.\n///\n/// Uses cursive's re-exported crossterm to ensure we operate on the same global\n/// raw mode state that the cursive backend uses.\npub struct TerminalRawModeGuard {\n    restored: bool,\n}\n\nuse cursive::backends::crossterm::crossterm as ct;\n\nimpl TerminalRawModeGuard {\n    pub fn leave() -> Self {\n        ct::terminal::disable_raw_mode().unwrap();\n        ct::execute!(\n            std::io::stdout(),\n            ct::event::DisableMouseCapture,\n            ct::style::ResetColor,\n            ct::style::SetAttribute(ct::style::Attribute::Reset),\n            ct::cursor::Show,\n            ct::terminal::LeaveAlternateScreen,\n        )\n        .unwrap();\n        Self { restored: false }\n    }\n\n    fn do_restore() -> std::io::Result<()> {\n        ct::terminal::enable_raw_mode()?;\n        ct::execute!(\n            std::io::stdout(),\n            ct::terminal::EnterAlternateScreen,\n            ct::event::EnableMouseCapture,\n            ct::cursor::Hide,\n        )\n    }\n\n    pub fn restore(&mut self) -> std::io::Result<()> {\n        self.restored = true;\n        Self::do_restore()\n    }\n}\n\nimpl Drop for TerminalRawModeGuard {\n    fn drop(&mut self) {\n        if !self.restored {\n            let _ = Self::do_restore();\n        }\n    }\n}\n\npub fn fuzzy_actions<F>(siv: &mut Cursive, actions: Vec<ActionDescription>, on_select: F)\nwhere\n    F: Fn(&mut Cursive, String) + 'static + Send + Sync,\n{\n    let items: Vec<(String, String)> = actions\n        .iter()\n        .map(|a| {\n            let text = a.text.to_string();\n            (text.clone(), text)\n        })\n        .collect();\n    fuzzy_select_strings(siv, \"Fuzzy search\", items, on_select);\n}\n\npub fn fuzzy_select_strings<F>(\n    siv: &mut Cursive,\n    title: &str,\n    items: Vec<(String, String)>,\n    on_select: F,\n) where\n    F: Fn(&mut Cursive, String) + 'static + Send + Sync,\n{\n    if siv.has_view(\"fuzzy_search\") {\n        return;\n    }\n\n    let mut select = SelectView::<String>::new().h_align(HAlign::Left).autojump();\n    for (label, value) in &items {\n        select.add_item(label.clone(), value.clone());\n    }\n\n    select.set_on_submit(move |siv, item: &String| {\n        let selected = item.clone();\n        siv.pop_layer();\n        on_select(siv, selected);\n    });\n\n    let search = EditView::new()\n        .on_edit(move |siv, query, _| {\n            siv.call_on_name(\"fuzzy_select\", |view: &mut SelectView<String>| {\n                view.clear();\n\n                let matcher = SkimMatcherV2::default();\n                let query_words: Vec<&str> = query.split_whitespace().collect();\n\n                let mut matches: Vec<(i64, String, String)> = items\n                    .iter()\n                    .filter_map(|(label, value)| {\n                        if query_words.is_empty() {\n                            return Some((0, label.clone(), value.clone()));\n                        }\n\n                        let mut total_score = 0i64;\n                        for word in &query_words {\n                            match matcher.fuzzy_match(label, word) {\n                                Some(score) => total_score += score,\n                                None => return None,\n                            }\n                        }\n\n                        Some((total_score, label.clone(), value.clone()))\n                    })\n                    .collect();\n\n                matches.sort_by(|a, b| b.0.cmp(&a.0));\n\n                for (_, label, value) in matches {\n                    view.add_item(label, value);\n                }\n            });\n        })\n        .on_submit(|siv, _| {\n            siv.call_on_name(\"fuzzy_select\", |view: &mut SelectView<String>| {\n                view.set_selection(0);\n            });\n            siv.focus_name(\"fuzzy_select\").ok();\n            siv.on_event(event::Event::Key(cursive::event::Key::Enter));\n        })\n        .with_name(\"fuzzy_search\");\n\n    let layout = LinearLayout::vertical()\n        .child(search)\n        .child(select.with_name(\"fuzzy_select\"));\n\n    let dialog = OnEventView::new(Panel::new(layout).title(title.to_string()))\n        .on_pre_event(event::Event::CtrlChar('k'), |s| {\n            s.call_on_name(\"fuzzy_select\", |view: &mut SelectView<String>| {\n                view.select_up(1);\n            });\n        })\n        .on_pre_event(event::Event::CtrlChar('j'), |s| {\n            s.call_on_name(\"fuzzy_select\", |view: &mut SelectView<String>| {\n                view.select_down(1);\n            });\n        })\n        .on_pre_event(event::Event::CtrlChar('w'), |s| {\n            let callback = s.call_on_name(\"fuzzy_search\", |view: &mut EditView| {\n                let content = view.get_content();\n                let cursor = view.get_cursor();\n\n                let before_cursor = &content[..cursor];\n                let trimmed = before_cursor.trim_end();\n                if trimmed.is_empty() {\n                    let cb = view.set_content(\"\");\n                    view.set_cursor(0);\n                    return Some(cb);\n                }\n\n                let new_pos = trimmed\n                    .rfind(|c: char| c.is_whitespace())\n                    .map(|pos| pos + 1)\n                    .unwrap_or(0);\n\n                let new_content = format!(\"{}{}\", &content[..new_pos], &content[cursor..]);\n                let cb = view.set_content(new_content);\n                view.set_cursor(new_pos);\n                Some(cb)\n            });\n\n            if let Some(Some(cb)) = callback {\n                cb(s);\n            }\n        })\n        .on_event(event::Key::Backspace, |_| {})\n        .on_event(event::Event::CtrlChar('p'), |s| {\n            s.pop_layer();\n        })\n        .on_event(event::Key::Esc, |s| {\n            s.pop_layer();\n        });\n\n    siv.add_layer(dialog);\n    siv.focus_name(\"fuzzy_search\").ok();\n}\n\npub fn highlight_sql(text: &str) -> Result<StyledString> {\n    let syntax_set = SyntaxSet::load_defaults_newlines();\n    let ts = ThemeSet::load_defaults();\n    let mut highlighter = syntect::easy::HighlightLines::new(\n        syntax_set\n            .find_syntax_by_token(\"sql\")\n            .context(\"Cannot load SQL syntax\")?,\n        &ts.themes[\"base16-ocean.dark\"],\n    );\n    // NOTE: parse() does not interpret syntect::highlighting::Color::a (alpha/transparency)\n    return cursive_syntect::parse(text, &mut highlighter, &syntax_set)\n        .context(\"Cannot highlight query\");\n}\n\npub fn get_query(query: &str, settings: &HashMap<String, String>) -> String {\n    // NOTE: LinesIterator (that is used by TextView for wrapping) cannot handle \"\\t\",\n    // it renders as a replacement glyph at the start of each wrapped/continuation line.\n    let mut ret = query.replace('\\t', \"    \");\n    let settings_str = settings\n        .iter()\n        .enumerate()\n        .map(|(i, kv)| {\n            let is_last = i + 1 == settings.len();\n            // NOTE: LinesIterator (that is used by TextView for wrapping) cannot handle \"\\t\", hence 4 spaces\n            let prefix = \"    \";\n            format!(\n                \"{}{}='{}'{}\\n\",\n                prefix,\n                kv.0,\n                kv.1.replace('\\'', \"\\\\\\'\"),\n                if !is_last { \",\" } else { \"\" }\n            )\n        })\n        .collect::<Vec<String>>()\n        .join(\"\");\n    if !query.contains(\"SETTINGS\") {\n        ret.push_str(\"\\nSETTINGS\\n\");\n    } else {\n        ret.push_str(\",\\n\");\n    }\n    ret.push_str(&settings_str);\n    return ret;\n}\n\npub fn edit_query(query: &str, settings: &HashMap<String, String>) -> Result<String> {\n    let mut tmp_file = Builder::new()\n        .prefix(\"chdig-query-\")\n        .suffix(\".sql\")\n        .rand_bytes(5)\n        .tempfile()?;\n\n    let query = get_query(query, settings);\n    tmp_file.write_all(query.as_bytes())?;\n\n    let editor = env::var_os(\"EDITOR\").unwrap_or_else(|| \"vim\".into());\n    let tmp_file_path = tmp_file.path().to_str().unwrap();\n\n    let _guard = TerminalRawModeGuard::leave();\n\n    let result = Command::new(&editor)\n        .arg(tmp_file_path)\n        .spawn()\n        .map_err(|e| Error::msg(format!(\"Cannot execute editor {:?} ({})\", editor, e)))?\n        .wait()?;\n\n    if !result.success() {\n        return Err(Error::msg(format!(\n            \"Editor exited unsuccessfully {:?} ({})\",\n            editor, result\n        )));\n    }\n\n    let query = fs::read_to_string(tmp_file_path)?;\n    return Ok(query);\n}\n\npub fn open_url_command(url: &str) -> Command {\n    let mut cmd = if cfg!(target_os = \"windows\") {\n        let mut c = Command::new(\"cmd\");\n        c.args([\"/C\", \"start\", \"\", url]); // \"\" to avoid stealing the first quoted argument as window title\n        c\n    } else if cfg!(target_os = \"macos\") {\n        let mut c = Command::new(\"open\");\n        c.arg(url);\n        c\n    } else {\n        let mut c = Command::new(\"xdg-open\");\n        c.arg(url);\n        c\n    };\n\n    cmd.stderr(Stdio::null()).stdout(Stdio::null());\n    cmd\n}\n\npub async fn share_graph(\n    graph: String,\n    pastila_clickhouse_host: &str,\n    pastila_url: &str,\n) -> Result<()> {\n    if graph.is_empty() {\n        return Err(Error::msg(\"Graph is empty\"));\n    }\n\n    // Create a self-contained HTML file that renders the Graphviz graph\n    // Using viz.js from CDN for client-side rendering\n    let html = format!(\n        r#\"<!DOCTYPE html>\n<html>\n<head>\n    <meta charset=\"utf-8\">\n    <title>Graphviz Graph</title>\n    <style>\n        body {{ margin: 0; padding: 20px; font-family: sans-serif; }}\n        #graph {{ text-align: center; }}\n    </style>\n</head>\n<body>\n    <div id=\"graph\">Loading graph...</div>\n    <script src=\"https://cdn.jsdelivr.net/npm/@viz-js/viz@3.2.4/lib/viz-standalone.js\"></script>\n    <script>\n        const dot = {};\n        Viz.instance().then(viz => {{\n            const svg = viz.renderSVGElement(dot);\n            const container = document.getElementById('graph');\n            container.innerHTML = '';\n            container.appendChild(svg);\n        }}).catch(err => {{\n            document.getElementById('graph').textContent = 'Error rendering graph: ' + err;\n        }});\n    </script>\n</body>\n</html>\"#,\n        serde_json::to_string(&graph)?\n    );\n\n    // Upload HTML to pastila with end-to-end encryption\n    let mut url = pastila::upload_encrypted(&html, pastila_clickhouse_host, pastila_url).await?;\n\n    if let Some(anchor_pos) = url.find('#') {\n        url.insert_str(anchor_pos, \".html\");\n    }\n\n    // Open the URL in the browser\n    open_url_command(&url).status()?;\n\n    Ok(())\n}\n\npub fn find_common_hostname_prefix_and_suffix<'a, I>(hostnames: I) -> (String, String)\nwhere\n    I: Iterator<Item = &'a str>,\n{\n    let hostnames_vec: Vec<&str> = hostnames.collect();\n    if hostnames_vec.is_empty() {\n        return (String::new(), String::new());\n    }\n\n    let first = hostnames_vec[0];\n\n    // Find common prefix\n    let mut prefix_end = first.len();\n    for pos in (0..first.len()).rev() {\n        let candidate = &first[..=pos];\n        if hostnames_vec[1..].iter().all(|h| h.starts_with(candidate)) {\n            prefix_end = pos + 1;\n            break;\n        }\n    }\n\n    let common_prefix = &first[..prefix_end];\n    let prefix_delim_pos = common_prefix\n        .rfind('.')\n        .into_iter()\n        .chain(common_prefix.rfind('-'))\n        .max();\n\n    let prefix = if let Some(pos) = prefix_delim_pos {\n        common_prefix[..=pos].to_string()\n    } else {\n        String::new()\n    };\n\n    // Find common suffix\n    let mut suffix_start = 0;\n    for pos in 0..first.len() {\n        let candidate = &first[pos..];\n        if hostnames_vec[1..].iter().all(|h| h.ends_with(candidate)) {\n            suffix_start = pos;\n            break;\n        }\n    }\n\n    let common_suffix = &first[suffix_start..];\n    let suffix_delim_pos = common_suffix\n        .find('.')\n        .into_iter()\n        .chain(common_suffix.find('-'))\n        .min();\n\n    let suffix = if let Some(pos) = suffix_delim_pos {\n        common_suffix[pos..].to_string()\n    } else {\n        String::new()\n    };\n\n    (prefix, suffix)\n}\n"
  },
  {
    "path": "src/view/log_view.rs",
    "content": "use anyhow::{Error, Result};\nuse chrono::{DateTime, Datelike, Duration, Local, Timelike};\nuse cursive::{\n    Cursive, Printer, Vec2,\n    event::{Callback, Event, EventResult, Key},\n    theme::{Color, ColorStyle, Style},\n    utils::{lines::spans::LinesIterator, markup::StyledString},\n    view::{Nameable, Resizable, ScrollStrategy, View, ViewWrapper, scroll},\n    views::{Dialog, EditView, NamedView, OnEventView},\n    wrap_impl,\n};\nuse regex::Regex;\nuse std::collections::{HashMap, hash_map::DefaultHasher};\nuse std::fs;\nuse std::hash::{Hash, Hasher};\nuse std::io::Write;\nuse unicode_width::UnicodeWidthStr;\n\nuse crate::common::RelativeDateTime;\nuse crate::interpreter::{ContextArc, TextLogArguments};\nuse crate::utils::find_common_hostname_prefix_and_suffix;\nuse crate::view::{TextLogView, show_bottom_prompt};\n\n// Hash-based color function matching ClickHouse's setColor from terminalColors.cpp\n// Uses YCbCr color space with constant brightness (y=128) for better readability\nfn hash_to_color(hash: u64) -> Color {\n    let y = 128u8;\n    let cb = ((hash >> 8) & 0xFF) as u8;\n    let cr = (hash & 0xFF) as u8;\n\n    // YCbCr to RGB conversion (ITU-R BT.601)\n    // R = Y + 1.402 * (Cr - 128)\n    // G = Y - 0.344136 * (Cb - 128) - 0.714136 * (Cr - 128)\n    // B = Y + 1.772 * (Cb - 128)\n\n    let cb_offset = cb as i32 - 128;\n    let cr_offset = cr as i32 - 128;\n\n    let r = (y as i32 + (1402 * cr_offset) / 1000).clamp(0, 255) as u8;\n    let g = (y as i32 - (344 * cb_offset) / 1000 - (714 * cr_offset) / 1000).clamp(0, 255) as u8;\n    let b = (y as i32 + (1772 * cb_offset) / 1000).clamp(0, 255) as u8;\n\n    Color::Rgb(r, g, b)\n}\n\n// Color for log priority level matching ClickHouse's setColorForLogPriority from terminalColors.cpp\nfn get_level_color(level: &str) -> Color {\n    match level {\n        // Fatal: \\033[1;41m (bold + red background) - using bright red\n        \"Fatal\" => Color::Rgb(255, 85, 85),\n        // Critical: \\033[7;31m (reverse video + red) - using bright red\n        \"Critical\" => Color::Rgb(255, 85, 85),\n        // Error: \\033[1;31m (bold red) - bright red\n        \"Error\" => Color::Rgb(255, 85, 85),\n        // Warning: \\033[0;31m (red) - normal red\n        \"Warning\" => Color::Rgb(255, 0, 0),\n        // Notice: \\033[0;33m (yellow) - normal yellow\n        \"Notice\" => Color::Rgb(255, 255, 0),\n        // Information: \\033[1m (bold) - using default terminal color (light gray)\n        \"Information\" => Color::Rgb(192, 192, 192),\n        // Debug: no color - default terminal color\n        \"Debug\" => Color::TerminalDefault,\n        // Trace: \\033[2m (dim) - dark gray\n        \"Trace\" => Color::Rgb(128, 128, 128),\n        // Test: no specific color in ClickHouse\n        \"Test\" => Color::TerminalDefault,\n        _ => Color::TerminalDefault,\n    }\n}\n\n// Hash function similar to ClickHouse's intHash64\nfn int_hash_64(value: u64) -> u64 {\n    let mut hasher = DefaultHasher::new();\n    value.hash(&mut hasher);\n    hasher.finish()\n}\n\nfn string_hash(s: &str) -> u64 {\n    let mut hasher = DefaultHasher::new();\n    s.hash(&mut hasher);\n    hasher.finish()\n}\n\n#[derive(Clone)]\npub struct LogEntry {\n    pub host_name: String,\n    pub display_host_name: Option<String>,\n    pub event_time_microseconds: DateTime<Local>,\n    pub thread_id: u64,\n    pub level: String,\n    pub message: String,\n    pub query_id: Option<String>,\n    pub logger_name: Option<String>,\n}\n\nstruct IdentifierMaps {\n    query_id_map: HashMap<String, String>,\n    logger_name_map: HashMap<String, String>,\n    level_map: HashMap<String, String>,\n    host_name_map: HashMap<String, String>,\n}\n\nimpl LogEntry {\n    fn to_styled_string(&self, cluster: bool) -> StyledString {\n        self.to_styled_string_with_identifiers(cluster, None)\n    }\n\n    fn to_styled_string_with_identifiers(\n        &self,\n        cluster: bool,\n        identifier_maps: Option<&IdentifierMaps>,\n    ) -> StyledString {\n        let mut line = StyledString::new();\n\n        if cluster {\n            line.append_plain(\"[\");\n            let host_hash = string_hash(&self.host_name);\n            let host_color = hash_to_color(host_hash);\n            let display_name = self.display_host_name.as_ref().unwrap_or(&self.host_name);\n            line.append_styled(display_name, host_color);\n\n            if let Some(maps) = identifier_maps\n                && let Some(id) = maps.host_name_map.get(&self.host_name)\n            {\n                line.append_styled(format!(\"[{}]\", id), Color::Rgb(255, 255, 0));\n            }\n            line.append_plain(\"] \");\n        }\n\n        // Format timestamp with microseconds matching ClickHouse format: YYYY.MM.DD HH:MM:SS.microseconds\n        let dt = self.event_time_microseconds;\n        let microseconds = dt.timestamp_subsec_micros();\n        let timestamp = format!(\n            \"{:04}.{:02}.{:02} {:02}:{:02}:{:02}.{:06}\",\n            dt.year(),\n            dt.month(),\n            dt.day(),\n            dt.hour(),\n            dt.minute(),\n            dt.second(),\n            microseconds\n        );\n        line.append_plain(format!(\"{} \", timestamp));\n\n        // Thread ID with hash-based coloring: [ thread_id ]\n        line.append_plain(\"[ \");\n        let thread_hash = int_hash_64(self.thread_id);\n        let thread_color = hash_to_color(thread_hash);\n        line.append_styled(format!(\"{}\", self.thread_id), thread_color);\n        line.append_plain(\" ] \");\n\n        // Query ID with hash-based coloring: {query_id}\n        // ClickHouse writes query_id even if empty for log parser convenience\n        line.append_plain(\"{\");\n        let query_id_str = self.query_id.as_deref().unwrap_or(\"\");\n        if !query_id_str.is_empty() {\n            let query_hash = string_hash(query_id_str);\n            let query_color = hash_to_color(query_hash);\n            line.append_styled(query_id_str, query_color);\n\n            if let Some(maps) = identifier_maps\n                && let Some(id) = maps.query_id_map.get(query_id_str)\n            {\n                line.append_styled(format!(\"[{}]\", id), Color::Rgb(255, 255, 0));\n            }\n        }\n        line.append_plain(\"} \");\n\n        // Priority level with color: <level>\n        line.append_plain(\"<\");\n        let level_color = get_level_color(self.level.as_str());\n        line.append_styled(self.level.as_str(), level_color);\n        if let Some(maps) = identifier_maps\n            && let Some(id) = maps.level_map.get(&self.level)\n        {\n            line.append_styled(format!(\"[{}]\", id), Color::Rgb(255, 255, 0));\n        }\n        line.append_plain(\"> \");\n\n        // Logger name (source) with hash-based coloring: source:\n        if let Some(logger_name) = &self.logger_name {\n            let logger_hash = string_hash(logger_name);\n            let logger_color = hash_to_color(logger_hash);\n            line.append_styled(logger_name, logger_color);\n\n            if let Some(maps) = identifier_maps\n                && let Some(id) = maps.logger_name_map.get(logger_name)\n            {\n                line.append_styled(format!(\"[{}]\", id), Color::Rgb(255, 255, 0));\n            }\n            line.append_plain(\": \");\n        }\n\n        // Message\n        line.append_plain(self.message.as_str());\n        return line;\n    }\n}\n\n#[derive(Clone)]\nenum FilterType {\n    QueryId(String),\n    LoggerName(String),\n    Level(String),\n    HostName(String),\n}\n\npub struct LogViewBase {\n    max_width: usize,\n\n    content_size_with_wrap: Vec2,\n    // Size without respecting wrap, since with wrap width is equal to the longest line\n    screen_size_without_wrap: Vec2,\n\n    needs_relayout: bool,\n    update_content: bool,\n    scroll_core: scroll::Core,\n\n    search_direction_forward: bool,\n    search_regex: Option<Regex>,\n    matched_row: Option<usize>,\n    matched_col: Option<usize>,\n    matched_len: usize,\n    cluster: bool,\n    wrap: bool,\n    no_strip_hostname_suffix: bool,\n    descending: bool,\n\n    // Filter mode state\n    filter_mode: bool,\n    filter_identifiers: HashMap<String, FilterType>,\n    active_filter: Option<FilterType>,\n\n    logs: Vec<LogEntry>,\n\n    // When filtering is active, stores indices into self.logs for visible entries\n    // Empty when no filter is active (all logs visible)\n    filtered_log_indices: Vec<usize>,\n\n    // Cumulative row counts: log_cumulative_rows[i] = total rows in logs 0..i\n    // This allows O(log n) binary search to map display_row -> log_index\n    log_cumulative_rows: Vec<usize>,\n    last_computed_width: usize,\n}\n\nimpl Default for LogViewBase {\n    fn default() -> Self {\n        Self {\n            max_width: 0,\n            content_size_with_wrap: Vec2::zero(),\n            screen_size_without_wrap: Vec2::zero(),\n            needs_relayout: false,\n            update_content: false,\n            scroll_core: scroll::Core::default(),\n            search_direction_forward: false,\n            search_regex: None,\n            matched_row: None,\n            matched_col: None,\n            matched_len: 0,\n            cluster: false,\n            wrap: false,\n            no_strip_hostname_suffix: false,\n            descending: false,\n            filter_mode: false,\n            filter_identifiers: HashMap::new(),\n            active_filter: None,\n            logs: Vec::new(),\n            filtered_log_indices: Vec::new(),\n            log_cumulative_rows: Vec::new(),\n            last_computed_width: usize::MAX,\n        }\n    }\n}\n\ncursive::impl_scroller!(LogViewBase::scroll_core);\n\nimpl LogViewBase {\n    // Get the log at the given visible index\n    // If filtering is active, maps through filtered_log_indices\n    fn get_visible_log(&self, visible_idx: usize) -> Option<&LogEntry> {\n        if self.filtered_log_indices.is_empty() {\n            self.logs.get(visible_idx)\n        } else {\n            self.filtered_log_indices\n                .get(visible_idx)\n                .and_then(|&idx| self.logs.get(idx))\n        }\n    }\n\n    // Get count of visible logs\n    fn visible_log_count(&self) -> usize {\n        if self.filtered_log_indices.is_empty() {\n            self.logs.len()\n        } else {\n            self.filtered_log_indices.len()\n        }\n    }\n\n    // Get identifier maps for rendering with highlights\n    fn get_identifier_maps(&self) -> Option<IdentifierMaps> {\n        if !self.filter_mode {\n            return None;\n        }\n\n        let mut identifier_maps = IdentifierMaps {\n            query_id_map: HashMap::new(),\n            logger_name_map: HashMap::new(),\n            level_map: HashMap::new(),\n            host_name_map: HashMap::new(),\n        };\n\n        for (id, filter_type) in &self.filter_identifiers {\n            match filter_type {\n                FilterType::QueryId(val) => {\n                    identifier_maps.query_id_map.insert(val.clone(), id.clone());\n                }\n                FilterType::LoggerName(val) => {\n                    identifier_maps\n                        .logger_name_map\n                        .insert(val.clone(), id.clone());\n                }\n                FilterType::Level(val) => {\n                    identifier_maps.level_map.insert(val.clone(), id.clone());\n                }\n                FilterType::HostName(val) => {\n                    identifier_maps\n                        .host_name_map\n                        .insert(val.clone(), id.clone());\n                }\n            }\n        }\n\n        Some(identifier_maps)\n    }\n\n    // Binary search to find which log a display row belongs to\n    // Returns (log_index, row_within_log)\n    fn display_row_to_log(&self, display_row: usize) -> Option<(usize, usize)> {\n        if self.log_cumulative_rows.is_empty() {\n            return None;\n        }\n\n        // Use proper binary search: find first cumulative > display_row\n        // cumulative_rows[i] = total rows in logs 0..=i\n        let log_idx = match self.log_cumulative_rows.binary_search(&(display_row + 1)) {\n            Ok(idx) => idx,  // Found exact match for display_row + 1\n            Err(idx) => idx, // Would insert at idx, so first element > display_row is at idx\n        };\n\n        if log_idx >= self.log_cumulative_rows.len() {\n            return None;\n        }\n\n        let row_start = if log_idx == 0 {\n            0\n        } else {\n            self.log_cumulative_rows[log_idx - 1]\n        };\n        let row_within_log = display_row - row_start;\n\n        Some((log_idx, row_within_log))\n    }\n\n    // Map log_index to its starting display row\n    fn log_to_display_row(&self, log_idx: usize) -> usize {\n        if log_idx == 0 {\n            0\n        } else {\n            self.log_cumulative_rows\n                .get(log_idx - 1)\n                .copied()\n                .unwrap_or(0)\n        }\n    }\n\n    fn extract_identifiers(&mut self) {\n        let mut query_ids: HashMap<String, usize> = HashMap::new();\n        let mut logger_names: HashMap<String, usize> = HashMap::new();\n        let mut levels: HashMap<String, usize> = HashMap::new();\n        let mut host_names: HashMap<String, usize> = HashMap::new();\n\n        for log in &self.logs {\n            if let Some(ref query_id) = log.query_id\n                && !query_id.is_empty()\n            {\n                query_ids.entry(query_id.clone()).or_insert(0);\n            }\n            if let Some(ref logger_name) = log.logger_name {\n                logger_names.entry(logger_name.clone()).or_insert(0);\n            }\n            levels.entry(log.level.clone()).or_insert(0);\n            host_names.entry(log.host_name.clone()).or_insert(0);\n        }\n\n        self.filter_identifiers.clear();\n        let mut counter = 1;\n\n        for query_id in query_ids.keys() {\n            let id = format!(\"q{}\", counter);\n            self.filter_identifiers\n                .insert(id, FilterType::QueryId(query_id.clone()));\n            counter += 1;\n        }\n\n        counter = 1;\n        for logger_name in logger_names.keys() {\n            let id = format!(\"l{}\", counter);\n            self.filter_identifiers\n                .insert(id, FilterType::LoggerName(logger_name.clone()));\n            counter += 1;\n        }\n\n        counter = 1;\n        for level in levels.keys() {\n            let id = format!(\"v{}\", counter);\n            self.filter_identifiers\n                .insert(id, FilterType::Level(level.clone()));\n            counter += 1;\n        }\n\n        counter = 1;\n        for host_name in host_names.keys() {\n            let id = format!(\"h{}\", counter);\n            self.filter_identifiers\n                .insert(id, FilterType::HostName(host_name.clone()));\n            counter += 1;\n        }\n    }\n\n    fn rebuild_content_with_highlights(&mut self) {\n        self.filtered_log_indices.clear();\n        self.needs_relayout = true;\n        self.compute_rows();\n    }\n\n    fn rebuild_content_normal(&mut self) {\n        self.filtered_log_indices.clear();\n        self.needs_relayout = true;\n        self.compute_rows();\n    }\n\n    fn apply_filter(&mut self) {\n        self.filtered_log_indices.clear();\n\n        if let Some(ref filter) = self.active_filter {\n            for (idx, log) in self.logs.iter().enumerate() {\n                let matches = match filter {\n                    FilterType::QueryId(val) => log.query_id.as_ref() == Some(val),\n                    FilterType::LoggerName(val) => log.logger_name.as_ref() == Some(val),\n                    FilterType::Level(val) => &log.level == val,\n                    FilterType::HostName(val) => &log.host_name == val,\n                };\n                if matches {\n                    self.filtered_log_indices.push(idx);\n                }\n            }\n        }\n\n        self.needs_relayout = true;\n        self.compute_rows();\n    }\n\n    fn search_in_direction(&mut self, forward: bool) -> bool {\n        if self.search_regex.is_none() {\n            return false;\n        }\n\n        let start_row = self\n            .matched_row\n            .unwrap_or_else(|| self.scroll_core.content_viewport().top());\n        let start_log_idx = self\n            .display_row_to_log(start_row)\n            .map(|(idx, _)| idx)\n            .unwrap_or(0);\n\n        let total_logs = self.visible_log_count();\n        let identifier_maps = self.get_identifier_maps();\n\n        if forward {\n            for log_idx in (start_log_idx..total_logs).chain(0..start_log_idx) {\n                if self.search_log(log_idx, start_log_idx, &identifier_maps, forward) {\n                    return true;\n                }\n            }\n        } else {\n            for log_idx in (0..=start_log_idx)\n                .rev()\n                .chain((start_log_idx + 1..total_logs).rev())\n            {\n                if self.search_log(log_idx, start_log_idx, &identifier_maps, forward) {\n                    return true;\n                }\n            }\n        }\n\n        false\n    }\n\n    fn search_log(\n        &mut self,\n        log_idx: usize,\n        start_log_idx: usize,\n        identifier_maps: &Option<IdentifierMaps>,\n        forward: bool,\n    ) -> bool {\n        if let Some(log) = self.get_visible_log(log_idx) {\n            let mut styled = if let Some(maps) = identifier_maps {\n                log.to_styled_string_with_identifiers(self.cluster, Some(maps))\n            } else {\n                log.to_styled_string(self.cluster)\n            };\n            styled.append(\"\\n\");\n\n            let display_row_start = self.log_to_display_row(log_idx);\n\n            if forward {\n                let mut current_row = display_row_start;\n                for row in LinesIterator::new(&styled, self.last_computed_width) {\n                    if log_idx == start_log_idx && Some(current_row) <= self.matched_row {\n                        current_row += 1;\n                        continue;\n                    }\n\n                    if self.search_row(&styled, &row, current_row, forward) {\n                        return true;\n                    }\n                    current_row += 1;\n                }\n            } else {\n                let rows: Vec<_> = LinesIterator::new(&styled, self.last_computed_width).collect();\n                for (row_within_log, row) in rows.iter().enumerate().rev() {\n                    let current_row = display_row_start + row_within_log;\n\n                    if log_idx == start_log_idx && Some(current_row) >= self.matched_row {\n                        continue;\n                    }\n\n                    if self.search_row(&styled, row, current_row, forward) {\n                        return true;\n                    }\n                }\n            }\n        }\n        false\n    }\n\n    fn search_row(\n        &mut self,\n        styled: &StyledString,\n        row: &cursive::utils::lines::spans::Row,\n        current_row: usize,\n        forward: bool,\n    ) -> bool {\n        let re = match &self.search_regex {\n            Some(re) => re,\n            None => return false,\n        };\n        let mut x = 0;\n        for span in row.resolve_stream(styled) {\n            if let Some(m) = re.find(span.content) {\n                self.matched_row = Some(current_row);\n                self.matched_col = Some(x + span.content[..m.start()].width());\n                self.matched_len = m.as_str().width();\n                log::trace!(\n                    \"search regex matched_row: {:?} ({}-search)\",\n                    self.matched_row,\n                    if forward { \"forward\" } else { \"reverse\" }\n                );\n                return true;\n            }\n            x += span.content.width();\n        }\n        false\n    }\n\n    fn update_search_forward(&mut self) -> bool {\n        self.search_in_direction(true)\n    }\n\n    fn update_search_reverse(&mut self) -> bool {\n        self.search_in_direction(false)\n    }\n\n    fn update_search(&mut self) -> bool {\n        // In case of resize we can have less rows then before,\n        // so reset the matched_row for this scenario to avoid out-of-bound access.\n        let total_rows = self.log_cumulative_rows.last().copied().unwrap_or(0);\n        if total_rows < self.matched_row.unwrap_or_default() {\n            self.matched_row = None;\n        }\n        if self.search_direction_forward {\n            return self.update_search_forward();\n        } else {\n            return self.update_search_reverse();\n        }\n    }\n\n    fn set_options(&mut self, options: &str) -> Result<()> {\n        if options.is_empty() {\n        } else if options == \"S\" {\n            self.wrap = !self.wrap;\n            log::trace!(\"Toggle wrap mode, switched to {}\", self.wrap);\n        } else {\n            return Err(Error::msg(format!(\"Invalid options: {}\", options)));\n        }\n        return Ok(());\n    }\n\n    fn push_logs(&mut self, mut logs: Vec<LogEntry>) {\n        log::trace!(\"Add {} log entries\", logs.len());\n\n        if logs.is_empty() {\n            return;\n        }\n\n        // In descending mode the \"head\" is the top of the viewport, otherwise it's the bottom.\n        let old_total_rows = self.log_cumulative_rows.last().copied().unwrap_or(0);\n        let viewport = self.scroll_core.content_viewport();\n        let at_head = if self.descending {\n            viewport.top() == 0\n        } else {\n            old_total_rows == 0 || viewport.bottom() + 1 >= old_total_rows\n        };\n        // If the user scrolled away from the head, pin the viewport so incoming rows do\n        // not yank them around (for DESC we still need to shift below, since prepending\n        // rotates every row index).\n        if !at_head {\n            self.scroll_core\n                .set_scroll_strategy(ScrollStrategy::KeepRow);\n        }\n\n        // Strip common hostname prefix and suffix from first 1000 newly added items\n        if !self.no_strip_hostname_suffix && logs.len() > 1 {\n            let sample_size = logs.len().min(1000);\n            let (common_prefix, common_suffix) = find_common_hostname_prefix_and_suffix(\n                logs.iter().take(sample_size).map(|l| l.host_name.as_str()),\n            );\n\n            if !common_prefix.is_empty() || !common_suffix.is_empty() {\n                for log in logs.iter_mut() {\n                    let mut hostname = log.host_name.as_str();\n\n                    if !common_prefix.is_empty()\n                        && let Some(stripped) = hostname.strip_prefix(&common_prefix)\n                    {\n                        hostname = stripped;\n                    }\n\n                    if !common_suffix.is_empty()\n                        && let Some(stripped) = hostname.strip_suffix(&common_suffix)\n                    {\n                        hostname = stripped;\n                    }\n\n                    log.display_host_name = Some(hostname.to_string());\n                }\n            }\n        }\n\n        if self.descending {\n            // Prepend: the batch already arrives newest-first (ORDER BY ... DESC),\n            // so splicing at the front keeps the global ordering newest -> oldest.\n            self.logs.splice(0..0, logs);\n            // Indices of existing logs shifted, so incremental compute_rows() is unsafe.\n            self.log_cumulative_rows.clear();\n        } else {\n            self.logs.extend(logs);\n        }\n\n        if self.filter_mode {\n            self.extract_identifiers();\n            self.rebuild_content_with_highlights();\n        } else if self.active_filter.is_some() {\n            self.apply_filter();\n        } else {\n            self.needs_relayout = true;\n            self.compute_rows();\n        }\n\n        // After prepending, shift the viewport down by the number of rows added so the\n        // user keeps looking at the same logical entry they were reading before.\n        if self.descending && !at_head {\n            let new_total_rows = self.log_cumulative_rows.last().copied().unwrap_or(0);\n            let delta = new_total_rows.saturating_sub(old_total_rows);\n            if delta > 0 {\n                let vp = self.scroll_core.content_viewport();\n                self.scroll_core\n                    .set_offset(Vec2::new(vp.left(), vp.top() + delta));\n            }\n        }\n    }\n\n    fn compute_rows(&mut self) {\n        let width = if self.wrap {\n            // For scrolling we need to subtract some padding\n            self.screen_size_without_wrap.x.saturating_sub(2)\n        } else {\n            usize::MAX\n        };\n\n        // On resize/wrap change row indices shift, so the old matched_row is invalid\n        if self.matched_row.is_some() && self.last_computed_width != width {\n            self.matched_row = None;\n        }\n\n        let visible_count = self.visible_log_count();\n\n        // Check if we can do incremental computation:\n        // - Width hasn't changed (no wrap mode change or resize affecting width)\n        // - No filtering is active (filtered_log_indices is empty, NOTE: we can optimize this case as well)\n        // - We have previous computed data\n        // - We're only adding logs (visible_count >= previous count)\n        let can_do_incremental = self.last_computed_width == width\n            && self.filtered_log_indices.is_empty()\n            && !self.log_cumulative_rows.is_empty()\n            && visible_count >= self.log_cumulative_rows.len();\n\n        let start_idx = if can_do_incremental {\n            self.log_cumulative_rows.len()\n        } else {\n            self.log_cumulative_rows.clear();\n            0\n        };\n\n        let mut max_width = if can_do_incremental {\n            self.max_width\n        } else {\n            0\n        };\n        let mut cumulative = if can_do_incremental {\n            *self.log_cumulative_rows.last().unwrap()\n        } else {\n            0\n        };\n\n        let identifier_maps = self.get_identifier_maps();\n\n        // Build cumulative row counts by computing styled strings on-demand\n        // We compute them here just to count rows, then discard them (saves memory)\n        for i in start_idx..visible_count {\n            if let Some(log) = self.get_visible_log(i) {\n                let mut styled = if let Some(ref maps) = identifier_maps {\n                    log.to_styled_string_with_identifiers(self.cluster, Some(maps))\n                } else {\n                    log.to_styled_string(self.cluster)\n                };\n                styled.append(\"\\n\");\n\n                let mut row_count = 0;\n                for row in LinesIterator::new(&styled, width) {\n                    max_width = usize::max(max_width, row.width);\n                    row_count += 1;\n                }\n                cumulative += row_count;\n                self.log_cumulative_rows.push(cumulative);\n            }\n        }\n\n        self.max_width = max_width;\n        self.last_computed_width = width;\n\n        log::trace!(\n            \"Updating rows cache (width: {:?}, wrap: {}, max width: {}, rows: {}, visible_logs: {}/{}, incremental: {}/{}, inner size: {:?}, last size: {:?})\",\n            width,\n            self.wrap,\n            max_width,\n            cumulative,\n            visible_count,\n            self.logs.len(),\n            can_do_incremental,\n            start_idx,\n            self.scroll_core.inner_size(),\n            self.scroll_core.last_available_size()\n        );\n\n        // Show the horizontal scrolling\n        self.needs_relayout = true;\n    }\n\n    fn rows_are_valid(&mut self, size: Vec2) -> bool {\n        if self.update_content || self.needs_relayout {\n            return false;\n        }\n        if self.wrap && self.content_size_with_wrap != size {\n            return false;\n        }\n        return true;\n    }\n\n    fn layout_content(&mut self, size: Vec2) {\n        if !self.rows_are_valid(size) {\n            log::trace!(\n                \"Size changed: content_size={:?}, screen_size={:?}, size={:?}\",\n                self.content_size_with_wrap,\n                self.screen_size_without_wrap,\n                size\n            );\n            self.content_size_with_wrap = size;\n            self.compute_rows();\n\n            self.scroll_core.set_scroll_x(!self.wrap);\n        }\n        self.needs_relayout = false;\n        self.update_content = false;\n    }\n\n    fn inner_required_size(&mut self, mut req: Vec2) -> Vec2 {\n        self.screen_size_without_wrap = req;\n\n        let total_rows = self.log_cumulative_rows.last().copied().unwrap_or(0);\n        req.y = total_rows;\n        req.x = usize::max(req.x, self.max_width);\n        return req;\n    }\n\n    fn draw_content(&self, printer: &Printer<'_, '_>) {\n        let start_row = printer.content_offset.y;\n        let end_row = start_row + printer.output_size.y;\n        let total_rows = self.log_cumulative_rows.last().copied().unwrap_or(0);\n\n        let identifier_maps = self.get_identifier_maps();\n\n        for display_row in start_row..end_row.min(total_rows) {\n            // Binary search to find which log this display row belongs to\n            if let Some((log_idx, row_within_log)) = self.display_row_to_log(display_row)\n                && let Some(log) = self.get_visible_log(log_idx)\n            {\n                let mut styled = if let Some(ref maps) = identifier_maps {\n                    log.to_styled_string_with_identifiers(self.cluster, Some(maps))\n                } else {\n                    log.to_styled_string(self.cluster)\n                };\n                styled.append(\"\\n\");\n                if let Some(row) =\n                    LinesIterator::new(&styled, self.last_computed_width).nth(row_within_log)\n                {\n                    let y = display_row;\n                    let mut x = 0;\n\n                    for span in row.resolve_stream(&styled) {\n                        if let Some(ref re) = self.search_regex {\n                            let content = span.content;\n                            let mut last_pos = 0;\n                            let mut has_match = false;\n\n                            for m in re.find_iter(content) {\n                                has_match = true;\n                                if m.start() > last_pos {\n                                    let before = &content[last_pos..m.start()];\n                                    printer.with_style(*span.attr, |printer| {\n                                        printer.print((x, y), before);\n                                    });\n                                    x += before.width();\n                                }\n\n                                let matched = m.as_str();\n                                // Use the same highlight theme as less(1):\n                                // - Always use black as text color\n                                // - Use original text color as background\n                                // - For no-style use white as background\n                                let bg_color = if *span.attr == Style::default() {\n                                    Color::Rgb(255, 255, 255).into()\n                                } else {\n                                    span.attr.color.front\n                                };\n                                let inverted_style = ColorStyle::new(Color::Rgb(0, 0, 0), bg_color);\n                                printer.with_style(inverted_style, |printer| {\n                                    printer.print((x, y), matched);\n                                });\n                                x += matched.width();\n\n                                last_pos = m.end();\n                            }\n\n                            if has_match {\n                                if last_pos < content.len() {\n                                    let after = &content[last_pos..];\n                                    printer.with_style(*span.attr, |printer| {\n                                        printer.print((x, y), after);\n                                    });\n                                    x += after.width();\n                                }\n                            } else {\n                                printer.with_style(*span.attr, |printer| {\n                                    printer.print((x, y), span.content);\n                                });\n                                x += span.content.width();\n                            }\n                        } else {\n                            // No match in this span or row, print normally\n                            printer.with_style(*span.attr, |printer| {\n                                printer.print((x, y), span.content);\n                                x += span.content.width();\n                            });\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    // Write plain text content from the styled string directly to a writer\n    fn write_plain_text<W: Write>(&self, writer: &mut W) -> Result<()> {\n        let visible_count = self.visible_log_count();\n\n        for i in 0..visible_count {\n            if let Some(log) = self.get_visible_log(i) {\n                let mut styled = log.to_styled_string(self.cluster);\n                styled.append(\"\\n\");\n\n                for row in LinesIterator::new(&styled, self.last_computed_width) {\n                    for span in row.resolve_stream(&styled) {\n                        writer.write_all(span.content.as_bytes())?;\n                    }\n                    writer.write_all(b\"\\n\")?;\n                }\n            }\n        }\n        Ok(())\n    }\n}\n\nfn show_filtered_logs_popup(siv: &mut Cursive) {\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    // Ensure filter mode is active and identifiers are extracted\n    siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n        if !base.filter_mode {\n            base.filter_mode = true;\n            base.extract_identifiers();\n            base.rebuild_content_with_highlights();\n        }\n    });\n\n    // Get current log entry's timestamp for time range calculation\n    let log_time = siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n        let viewport = base.scroll_core.content_viewport();\n        let top_row = viewport.top();\n\n        if let Some((log_idx, _)) = base.display_row_to_log(top_row)\n            && let Some(log) = base.get_visible_log(log_idx)\n        {\n            return Some(log.event_time_microseconds);\n        }\n        None\n    });\n\n    let Some(Some(event_time)) = log_time else {\n        siv.add_layer(Dialog::info(\"No log entry at current position\"));\n        return;\n    };\n\n    // Calculate time range: ±1 minute from the log entry\n    let start = event_time - Duration::try_minutes(1).unwrap();\n    let end = event_time + Duration::try_minutes(1).unwrap();\n\n    let apply_adjacent_filter = move |siv: &mut Cursive, text: &str| {\n        let identifier = text.trim().to_string();\n\n        if identifier.is_empty() {\n            return;\n        }\n\n        // Get the filter type for this identifier\n        let filter_info = siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n            base.filter_mode = false;\n            base.filter_identifiers.get(&identifier).cloned()\n        });\n\n        let Some(Some(filter_type)) = filter_info else {\n            siv.add_layer(Dialog::info(format!(\"Unknown identifier: {}\", identifier)));\n            return;\n        };\n\n        // Build TextLogArguments based on filter type\n        let (title, args) = match filter_type {\n            FilterType::HostName(hostname) => (\n                format!(\"Logs for host: {}\", hostname),\n                TextLogArguments {\n                    query_ids: None,\n                    logger_names: None,\n                    hostname: Some(hostname),\n                    message_filter: None,\n                    max_level: None,\n                    start,\n                    end: RelativeDateTime::from(end),\n                },\n            ),\n            FilterType::QueryId(query_id) => (\n                format!(\"Logs for query: {}\", query_id),\n                TextLogArguments {\n                    query_ids: Some(vec![query_id]),\n                    logger_names: None,\n                    hostname: None,\n                    message_filter: None,\n                    max_level: None,\n                    start,\n                    end: RelativeDateTime::from(end),\n                },\n            ),\n            FilterType::LoggerName(logger_name) => (\n                format!(\"Logs for logger: {}\", logger_name),\n                TextLogArguments {\n                    query_ids: None,\n                    logger_names: Some(vec![logger_name]),\n                    hostname: None,\n                    message_filter: None,\n                    max_level: None,\n                    start,\n                    end: RelativeDateTime::from(end),\n                },\n            ),\n            FilterType::Level(level) => (\n                format!(\"Logs with level <= {}\", level),\n                TextLogArguments {\n                    query_ids: None,\n                    logger_names: None,\n                    hostname: None,\n                    message_filter: None,\n                    max_level: Some(level),\n                    start,\n                    end: RelativeDateTime::from(end),\n                },\n            ),\n        };\n\n        siv.pop_layer();\n\n        siv.add_layer(\n            Dialog::around(\n                TextLogView::new(\"filtered_logs\", context.clone(), args)\n                    .with_name(\"filtered_logs\")\n                    .full_screen(),\n            )\n            .title(title),\n        );\n    };\n\n    show_bottom_prompt(siv, \"(popup) identifier:\", apply_adjacent_filter);\n}\n\npub struct LogView {\n    inner_view: OnEventView<NamedView<LogViewBase>>,\n}\n\nimpl LogView {\n    pub fn new(\n        cluster: bool,\n        wrap: bool,\n        no_strip_hostname_suffix: bool,\n        descending: bool,\n    ) -> Self {\n        let mut v = LogViewBase {\n            needs_relayout: true,\n            cluster,\n            wrap,\n            no_strip_hostname_suffix,\n            descending,\n            ..Default::default()\n        };\n        // In descending mode the newest log goes on top, so pin the viewport there and\n        // let incremental updates keep pushing old content down.\n        v.scroll_core.set_scroll_strategy(if descending {\n            ScrollStrategy::StickToTop\n        } else {\n            ScrollStrategy::StickToBottom\n        });\n        v.scroll_core.set_scroll_x(!wrap);\n        v.scroll_core.set_scroll_y(true);\n        // NOTE: we cannot pass mutable ref to view in search_prompt callback, sigh.\n        let v = v.with_name(\"logs\");\n\n        let scroll = move |v: &mut NamedView<LogViewBase>, e: &Event| -> Option<EventResult> {\n            v.get_mut().matched_row = None;\n            return Some(scroll::on_event(\n                &mut *v.get_mut(),\n                e.clone(),\n                |s: &mut LogViewBase, e| s.on_event(e),\n                |s, si| s.important_area(si),\n            ));\n        };\n\n        let show_options = |siv: &mut Cursive| {\n            let options = move |siv: &mut Cursive, text: &str| {\n                let status = siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n                    let status = base.set_options(text);\n                    base.compute_rows();\n                    return status;\n                });\n                siv.pop_layer();\n                if let Some(Err(err)) = status {\n                    siv.add_layer(Dialog::info(err.to_string()));\n                }\n            };\n            show_bottom_prompt(siv, \"-\", options);\n        };\n\n        let search_prompt_impl = |siv: &mut Cursive, forward: bool| {\n            let find = move |siv: &mut Cursive, text: &str| {\n                let re = match Regex::new(text) {\n                    Ok(re) => re,\n                    Err(err) => {\n                        siv.pop_layer();\n                        siv.add_layer(Dialog::info(format!(\"Invalid regex: {err}\")));\n                        return;\n                    }\n                };\n                let found = siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n                    base.search_regex = Some(re);\n                    base.matched_row = None;\n                    base.matched_col = None;\n                    base.matched_len = 0;\n                    base.search_direction_forward = forward;\n                    base.update_search()\n                });\n                siv.pop_layer();\n                if let Some(false) = found {\n                    siv.add_layer(Dialog::info(\"Pattern not found\"));\n                }\n            };\n            show_bottom_prompt(siv, \"/\", find);\n        };\n        let search_prompt_forward = move |siv: &mut Cursive| {\n            search_prompt_impl(siv, /* forward= */ true);\n        };\n        let search_prompt_reverse = move |siv: &mut Cursive| {\n            search_prompt_impl(siv, /* forward= */ false);\n        };\n\n        let show_save_prompt = |siv: &mut Cursive| {\n            let save_file_impl = |siv: &mut Cursive| {\n                let file_path = siv\n                    .call_on_name(\"save_file_path\", |view: &mut EditView| {\n                        view.get_content().to_string()\n                    })\n                    .unwrap();\n                siv.pop_layer();\n\n                if file_path.trim().is_empty() {\n                    siv.add_layer(Dialog::info(\"File path cannot be empty\"));\n                    return;\n                }\n\n                let result = siv.call_on_name(\"logs\", |base: &mut LogViewBase| -> Result<()> {\n                    let mut file = fs::File::create(&file_path)?;\n                    base.write_plain_text(&mut file)?;\n                    Ok(())\n                });\n\n                match result {\n                    Some(Ok(_)) => {\n                        siv.add_layer(Dialog::info(format!(\"Logs saved to: {}\", file_path)));\n                    }\n                    Some(Err(err)) => {\n                        siv.add_layer(Dialog::info(format!(\"Error saving file: {}\", err)));\n                    }\n                    None => {\n                        siv.add_layer(Dialog::info(\"Error: Could not access log content\"));\n                    }\n                }\n            };\n\n            let save_file_for_submit = {\n                move |siv: &mut Cursive, _: &str| {\n                    save_file_impl(siv);\n                }\n            };\n            let view = EditView::new()\n                .on_submit(save_file_for_submit)\n                .with_name(\"save_file_path\")\n                .min_width(40);\n            siv.add_layer(\n                Dialog::around(view)\n                    .title(\"Save logs to file\")\n                    .button(\"Save\", save_file_impl)\n                    .button(\"Cancel\", |siv: &mut Cursive| {\n                        siv.pop_layer();\n                    }),\n            );\n        };\n\n        let show_share_prompt = |siv: &mut Cursive| {\n            let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n            let dialog = Dialog::text(format!(\n                \"Share logs to {} with end-to-end encryption?\",\n                context.clone().lock().unwrap().options.service.pastila_url\n            ))\n            .title(\"Share Logs\")\n            .button(\"Share (encrypted)\", move |siv: &mut Cursive| {\n                let context = context.clone();\n                siv.pop_layer();\n\n                let content =\n                    siv.call_on_name(\"logs\", |base: &mut LogViewBase| -> Result<String> {\n                        let mut buffer = Vec::new();\n                        base.write_plain_text(&mut buffer)?;\n                        Ok(String::from_utf8(buffer)?)\n                    });\n\n                let content = match content {\n                    Some(Ok(c)) => c,\n                    Some(Err(e)) => {\n                        siv.add_layer(Dialog::info(format!(\"Error reading logs: {}\", e)));\n                        return;\n                    }\n                    None => {\n                        siv.add_layer(Dialog::info(\"Error: Could not access log content\"));\n                        return;\n                    }\n                };\n\n                if content.trim().is_empty() {\n                    siv.add_layer(Dialog::info(\"No logs to share\"));\n                    return;\n                }\n\n                siv.add_layer(Dialog::text(\"Uploading logs...\").title(\"Please wait\"));\n\n                context\n                    .lock()\n                    .unwrap()\n                    .worker\n                    .send(true, crate::interpreter::WorkerEvent::ShareLogs(content));\n            })\n            .button(\"Cancel\", |siv: &mut Cursive| {\n                siv.pop_layer();\n            });\n\n            siv.add_layer(dialog);\n        };\n\n        let toggle_filter_mode_and_prompt = |siv: &mut Cursive| {\n            siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n                if base.filter_mode {\n                    base.filter_mode = false;\n                    base.active_filter = None;\n                    base.rebuild_content_normal();\n                } else {\n                    base.filter_mode = true;\n                    base.extract_identifiers();\n                    base.rebuild_content_with_highlights();\n                }\n            });\n\n            let should_show_prompt = siv\n                .call_on_name(\"logs\", |base: &mut LogViewBase| base.filter_mode)\n                .unwrap_or(false);\n\n            if should_show_prompt {\n                let apply_filter = move |siv: &mut Cursive, text: &str| {\n                    let identifier = text.trim().to_string();\n                    siv.pop_layer();\n\n                    if identifier.is_empty() {\n                        siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n                            base.filter_mode = false;\n                            base.active_filter = None;\n                            base.rebuild_content_normal();\n                        });\n                        return;\n                    }\n\n                    let filter_result = siv.call_on_name(\"logs\", |base: &mut LogViewBase| {\n                        if let Some(filter_type) = base.filter_identifiers.get(&identifier) {\n                            base.filter_mode = false;\n                            base.active_filter = Some(filter_type.clone());\n                            base.apply_filter();\n                            Ok(())\n                        } else {\n                            Err(format!(\"Unknown identifier: {}\", identifier))\n                        }\n                    });\n\n                    if let Some(Err(msg)) = filter_result {\n                        siv.add_layer(Dialog::info(msg));\n                    }\n                };\n                show_bottom_prompt(siv, \"identifier:\", apply_filter);\n            }\n        };\n\n        let v = OnEventView::new(v)\n            .on_pre_event_inner(Key::PageUp, scroll)\n            .on_pre_event_inner(Key::PageDown, scroll)\n            .on_pre_event_inner(Key::Left, scroll)\n            .on_pre_event_inner(Key::Right, scroll)\n            .on_pre_event_inner(Key::Up, scroll)\n            .on_pre_event_inner(Key::Down, scroll)\n            .on_pre_event_inner('j', move |v, _| scroll(v, &Event::Key(Key::Down)))\n            .on_pre_event_inner('k', move |v, _| scroll(v, &Event::Key(Key::Up)))\n            .on_pre_event_inner('g', move |v, _| scroll(v, &Event::Key(Key::Home)))\n            .on_pre_event_inner(Key::End, move |v, _| {\n                let mut base = v.get_mut();\n                base.matched_row = None;\n                base.scroll_core\n                    .set_scroll_strategy(ScrollStrategy::StickToBottom);\n                Some(EventResult::consumed())\n            })\n            .on_pre_event_inner('G', move |v, _| {\n                let mut base = v.get_mut();\n                base.matched_row = None;\n                base.scroll_core\n                    .set_scroll_strategy(ScrollStrategy::StickToBottom);\n                Some(EventResult::consumed())\n            })\n            .on_event_inner('-', move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(show_options))));\n            })\n            .on_event_inner('/', move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    search_prompt_forward,\n                ))));\n            })\n            .on_event_inner('?', move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    search_prompt_reverse,\n                ))));\n            })\n            .on_event_inner('n', move |v, _| {\n                let mut base = v.get_mut();\n                base.search_direction_forward = true;\n                if base.update_search_forward() {\n                    return Some(EventResult::consumed());\n                } else {\n                    return Some(EventResult::Consumed(Some(Callback::from_fn(|siv| {\n                        siv.add_layer(Dialog::info(\"Pattern not found\"));\n                    }))));\n                }\n            })\n            .on_event_inner('N', move |v, _| {\n                let mut base = v.get_mut();\n                base.search_direction_forward = false;\n                if base.update_search_reverse() {\n                    return Some(EventResult::consumed());\n                } else {\n                    return Some(EventResult::Consumed(Some(Callback::from_fn(|siv| {\n                        siv.add_layer(Dialog::info(\"Pattern not found\"));\n                    }))));\n                }\n            })\n            .on_event_inner('s', move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    show_save_prompt,\n                ))));\n            })\n            .on_event_inner('S', move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    show_share_prompt,\n                ))));\n            })\n            .on_event_inner(Event::CtrlChar('f'), move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    toggle_filter_mode_and_prompt,\n                ))));\n            })\n            .on_event_inner(Event::CtrlChar('s'), move |_, _| {\n                return Some(EventResult::Consumed(Some(Callback::from_fn(\n                    show_filtered_logs_popup,\n                ))));\n            });\n\n        let log_view = LogView { inner_view: v };\n        return log_view;\n    }\n\n    pub fn push_logs(&mut self, logs: Vec<LogEntry>) {\n        self.inner_view.get_inner_mut().get_mut().push_logs(logs);\n    }\n}\n\nimpl View for LogViewBase {\n    fn draw(&self, printer: &Printer<'_, '_>) {\n        scroll::draw(self, printer, Self::draw_content);\n    }\n\n    fn layout(&mut self, size: Vec2) {\n        scroll::layout(\n            self,\n            size.saturating_sub((0, 0)),\n            self.needs_relayout,\n            Self::layout_content,\n            Self::inner_required_size,\n        );\n\n        if let Some(matched_row) = self.matched_row {\n            let match_start = self.matched_col.unwrap_or(0);\n            let match_end = match_start + self.matched_len;\n            let viewport_width = self.scroll_core.last_available_size().x;\n            let current_offset = self.scroll_core.content_viewport().left();\n\n            // Only adjust horizontal scroll if the match is not fully visible\n            let x_offset = if match_end > current_offset + viewport_width {\n                // Match extends beyond right edge - scroll to show the end with max context on left\n                match_end.saturating_sub(viewport_width)\n            } else if match_start < current_offset {\n                // Match starts before left edge - scroll to show start with some context\n                match_start\n            } else {\n                // Match is already visible - keep current position\n                current_offset\n            };\n\n            self.scroll_core.set_offset((x_offset, matched_row));\n        }\n    }\n}\n\nimpl ViewWrapper for LogView {\n    wrap_impl!(self.inner_view: OnEventView<NamedView<LogViewBase>>);\n\n    fn wrap_required_size(&mut self, mut req: Vec2) -> Vec2 {\n        req = self\n            .inner_view\n            .get_inner_mut()\n            .get_mut()\n            .inner_required_size(req);\n        // For scrollbars\n        req.x += 1;\n        req.y += 1;\n        return req;\n    }\n}\n"
  },
  {
    "path": "src/view/mod.rs",
    "content": "mod log_view;\nmod navigation;\nmod provider;\npub mod providers;\nmod queries_view;\nmod query_view;\nmod registry;\npub mod search_history;\nmod settings_view;\nmod sql_query_view;\nmod summary_view;\npub mod table_view;\nmod text_log_view;\nmod utils;\n\npub use navigation::Navigation;\npub use provider::ViewProvider;\npub use queries_view::QueriesView;\npub use queries_view::Type as ProcessesType;\npub use query_view::QueryView;\npub use registry::ViewRegistry;\npub use sql_query_view::Row as QueryResultRow;\npub use sql_query_view::SQLQueryView;\npub use summary_view::SummaryView;\n\npub use table_view::TableViewItem;\n\npub use log_view::LogEntry;\npub use log_view::LogView;\npub use text_log_view::TextLogView;\n\npub use utils::show_bottom_prompt;\n"
  },
  {
    "path": "src/view/navigation.rs",
    "content": "use crate::utils::{fuzzy_actions, fuzzy_select_strings};\nuse crate::{\n    common::parse_datetime_or_date,\n    interpreter::{ContextArc, WorkerEvent, clickhouse::TraceType, options::ChDigViews},\n    view::{self, settings_view},\n};\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse cursive::{\n    Cursive,\n    event::{Event, EventResult, Key},\n    theme::{BaseColor, Color, ColorStyle, Effect, PaletteColor, Style, Theme},\n    utils::{markup::StyledString, span::SpannedString},\n    view::{IntoBoxedView, Nameable, Resizable, View},\n    views::{Dialog, DummyView, EditView, LinearLayout, OnEventView, SelectView, TextView},\n};\nuse cursive_flexi_logger_view::toggle_flexi_logger_debug_console;\n\nfn toggle_debug_metrics(siv: &mut Cursive) {\n    let ctx = siv.user_data::<ContextArc>().unwrap().clone();\n    let metrics = ctx.lock().unwrap().debug_metrics.clone();\n    let shown = metrics.toggle_shown();\n    // Paint immediately on both transitions so the user sees the toggle take effect\n    // without waiting for the next refresh tick (and so stale numbers don't linger on hide).\n    if shown {\n        siv.set_statusbar_debug(metrics.snapshot().to_string());\n    } else {\n        siv.set_statusbar_debug(\"\");\n    }\n}\n\nfn make_menu_text() -> StyledString {\n    let mut text = StyledString::new();\n\n    // F1\n    text.append_plain(\"F1\");\n    text.append_styled(\"Help\", ColorStyle::highlight());\n    // F2\n    text.append_plain(\"F2\");\n    text.append_styled(\"Views\", ColorStyle::highlight());\n    // F3\n    text.append_plain(\"F3\");\n    text.append_styled(\"Settings\", ColorStyle::highlight());\n    // F8\n    text.append_plain(\"F8\");\n    text.append_styled(\"Actions\", ColorStyle::highlight());\n\n    return text;\n}\n\npub trait Navigation {\n    fn has_view(&mut self, name: &str) -> bool;\n\n    fn make_theme_from_therminal(&mut self) -> Theme;\n    fn pop_ui(&mut self, exit: bool);\n    fn toggle_pause_updates(&mut self, reason: Option<&str>);\n    fn refresh_view(&mut self);\n    fn seek_time_frame(&mut self, is_sub: bool);\n    fn select_time_frame(&mut self);\n\n    fn initialize_global_shortcuts(&mut self, context: ContextArc);\n    fn initialize_views_menu(&mut self, context: ContextArc);\n    fn chdig(&mut self, context: ContextArc);\n\n    fn show_help_dialog(&mut self);\n    fn show_settings_dialog(&mut self);\n    fn show_views(&mut self);\n    fn show_actions(&mut self);\n    fn show_fuzzy_actions(&mut self);\n    fn show_server_flamegraph(&mut self, tui: bool, trace_type: Option<TraceType>);\n    fn show_jemalloc_flamegraph(&mut self, tui: bool);\n    fn show_server_perfetto(&mut self);\n    fn show_connection_dialog(&mut self);\n\n    fn drop_main_view(&mut self);\n    fn set_main_view<V: IntoBoxedView + 'static>(&mut self, view: V);\n\n    fn set_statusbar_version(&mut self, main_content: impl Into<SpannedString<Style>>);\n    fn set_statusbar_content(&mut self, content: impl Into<SpannedString<Style>>);\n    fn set_statusbar_connection(&mut self, content: impl Into<SpannedString<Style>>);\n    fn set_statusbar_debug(&mut self, content: impl Into<SpannedString<Style>>);\n\n    // TODO: move into separate trait\n    fn call_on_name_or_render_error<V, F>(&mut self, name: &str, callback: F)\n    where\n        V: View,\n        F: FnOnce(&mut V) -> Result<()>;\n}\n\nimpl Navigation for Cursive {\n    fn has_view(&mut self, name: &str) -> bool {\n        return self.focus_name(name).is_ok();\n    }\n\n    fn make_theme_from_therminal(&mut self) -> Theme {\n        let mut theme = self.current_theme().clone();\n        theme.palette[PaletteColor::Background] = Color::TerminalDefault;\n        theme.palette[PaletteColor::View] = Color::TerminalDefault;\n        theme.palette[PaletteColor::Primary] = Color::TerminalDefault;\n        theme.palette[PaletteColor::Highlight] = Color::Light(BaseColor::Cyan);\n        theme.palette[PaletteColor::HighlightText] = Color::Dark(BaseColor::Black);\n        theme.shadow = false;\n        return theme;\n    }\n\n    fn pop_ui(&mut self, exit: bool) {\n        // Close left menu\n        let mut has_left_menu = false;\n        self.call_on_name(\"left_menu\", |left_menu_view: &mut LinearLayout| {\n            if !left_menu_view.is_empty() {\n                left_menu_view\n                    .remove_child(left_menu_view.len() - 1)\n                    .expect(\"No child view to remove\");\n                has_left_menu = true;\n            }\n        });\n        // Once at a time\n        if has_left_menu {\n            self.focus_name(\"main\").unwrap();\n            return;\n        }\n\n        if self.screen_mut().len() == 1 {\n            if exit {\n                self.quit();\n            }\n        } else {\n            self.pop_layer();\n        }\n    }\n\n    fn toggle_pause_updates(&mut self, reason: Option<&str>) {\n        let is_paused;\n        {\n            let mut context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n            // NOTE: though it will be better to stop sending any message completely, instead of\n            // simply ignoring them\n            context.worker.toggle_pause();\n            is_paused = context.worker.is_paused();\n        }\n\n        self.call_on_name(\"is_paused\", |v: &mut TextView| {\n            let mut text = StyledString::new();\n            if is_paused {\n                text.append_styled(\" PAUSED\", Effect::Bold);\n                if let Some(reason) = reason {\n                    text.append_styled(format!(\" ({})\", reason), Effect::Bold);\n                }\n                text.append_styled(\" press P to resume\", Effect::Italic);\n            }\n            v.set_content(text);\n        });\n    }\n\n    fn refresh_view(&mut self) {\n        let context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n        log::trace!(\"Toggle refresh\");\n        context.trigger_view_refresh();\n    }\n\n    fn seek_time_frame(&mut self, is_sub: bool) {\n        let mut context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n        context.shift_time_interval(is_sub, 10);\n        context.trigger_view_refresh();\n    }\n\n    fn select_time_frame(&mut self) {\n        let on_submit = move |siv: &mut Cursive| {\n            let start = siv\n                .call_on_name(\"start\", |view: &mut EditView| view.get_content())\n                .unwrap();\n            let end = siv\n                .call_on_name(\"end\", |view: &mut EditView| view.get_content())\n                .unwrap();\n\n            siv.pop_layer();\n\n            let new_begin = match parse_datetime_or_date(&start) {\n                Ok(new) => new,\n                Err(err) => {\n                    siv.add_layer(Dialog::info(err));\n                    return;\n                }\n            };\n            let new_end = match parse_datetime_or_date(&end) {\n                Ok(new) => new,\n                Err(err) => {\n                    siv.add_layer(Dialog::info(err));\n                    return;\n                }\n            };\n            log::debug!(\"Set time frame to ({}, {})\", new_begin, new_end);\n            let mut context = siv.user_data::<ContextArc>().unwrap().lock().unwrap();\n            context.options.view.start = new_begin.into();\n            context.options.view.end = new_end.into();\n            context.trigger_view_refresh();\n        };\n\n        let view = OnEventView::new(\n            Dialog::new()\n                .title(\"Set the time interval\")\n                .content(\n                    LinearLayout::vertical()\n                        .child(TextView::new(\n                            \"format: YYYY-MM-DDTHH:MM:SS[.ssssss][±hh:mm|Z]\",\n                        ))\n                        .child(DummyView)\n                        .child(TextView::new(\"start:\"))\n                        .child(EditView::new().with_name(\"start\"))\n                        .child(DummyView)\n                        .child(TextView::new(\"end:\"))\n                        .child(EditView::new().with_name(\"end\")),\n                )\n                .button(\"Submit\", on_submit),\n        );\n        self.add_layer(view);\n    }\n\n    fn chdig(&mut self, context: ContextArc) {\n        self.set_user_data(context.clone());\n        self.initialize_global_shortcuts(context.clone());\n        self.initialize_views_menu(context.clone());\n\n        let theme = self.make_theme_from_therminal();\n        self.set_theme(theme);\n\n        self.add_fullscreen_layer(\n            LinearLayout::horizontal()\n                .child(LinearLayout::vertical().with_name(\"left_menu\"))\n                .child(\n                    LinearLayout::vertical()\n                        .child(\n                            LinearLayout::horizontal()\n                                .child(TextView::new(make_menu_text()))\n                                .child(TextView::new(\"\").with_name(\"is_paused\"))\n                                // Align status to the right\n                                .child(DummyView.full_width())\n                                // Empty until `!` toggles it — no visual cost when hidden.\n                                .child(TextView::new(\"\").with_name(\"debug_status\"))\n                                .child(TextView::new(\"\").with_name(\"status\"))\n                                .child(DummyView.fixed_width(1))\n                                .child(TextView::new(\"\").with_name(\"connection\"))\n                                .child(DummyView.fixed_width(1))\n                                .child(TextView::new(\"\").with_name(\"version\")),\n                        )\n                        .child(view::SummaryView::new(context.clone()).with_name(\"summary\"))\n                        .with_name(\"main\"),\n                ),\n        );\n\n        {\n            let ctx = context.lock().unwrap();\n            self.set_statusbar_version(ctx.server_version.clone());\n            self.set_statusbar_connection(ctx.options.clickhouse.connection_info());\n        }\n\n        let start_view = context\n            .lock()\n            .unwrap()\n            .options\n            .start_view\n            .unwrap_or(ChDigViews::Queries);\n\n        let provider = context\n            .lock()\n            .unwrap()\n            .view_registry\n            .get_by_view_type(start_view);\n        provider.show(self, context.clone());\n    }\n\n    /// Ignore rustfmt max_width, otherwise callback actions looks ugly\n    #[rustfmt::skip]\n    fn initialize_global_shortcuts(&mut self, context: ContextArc) {\n        let mut context = context.lock().unwrap();\n\n        context.add_global_action(self, \"Show help\", Key::F1, |siv| siv.show_help_dialog());\n        context.add_global_action(self, \"Settings\", Key::F3, |siv| siv.show_settings_dialog());\n\n        context.add_global_action(self, \"Views\", Key::F2, |siv| siv.show_views());\n        context.add_global_action(self, \"Show actions\", Key::F8, |siv| siv.show_actions());\n        context.add_global_action(self, \"Fuzzy actions\", Event::CtrlChar('p'), |siv| siv.show_fuzzy_actions());\n\n        if context.options.clickhouse.cluster.is_some() {\n            context.add_global_action(self, \"Filter by host\", Event::CtrlChar('h'), |siv| siv.show_connection_dialog());\n        }\n\n        context.add_global_action(self, \"Server CPU Flamegraph\", 'F', |siv| siv.show_server_flamegraph(true, Some(TraceType::CPU)));\n        context.add_global_action_without_shortcut(self, \"Server Real Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::Real)));\n        context.add_global_action_without_shortcut(self, \"Server Memory Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::Memory)));\n        context.add_global_action_without_shortcut(self, \"Server Memory Sample Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::MemorySample)));\n        context.add_global_action_without_shortcut(self, \"Server Jemalloc Sample Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::JemallocSample)));\n        context.add_global_action_without_shortcut(self, \"Server MemoryAllocatedWithoutCheck Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::MemoryAllocatedWithoutCheck)));\n        context.add_global_action_without_shortcut(self, \"Server Events Flamegraph\", |siv| siv.show_server_flamegraph(true, Some(TraceType::ProfileEvent)));\n        context.add_global_action_without_shortcut(self, \"Server Live Flamegraph\", |siv| siv.show_server_flamegraph(true, None));\n        context.add_global_action_without_shortcut(self, \"Share Server CPU Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::CPU)));\n        context.add_global_action_without_shortcut(self, \"Share Server Real Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::Real)));\n        context.add_global_action_without_shortcut(self, \"Share Server Memory Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::Memory)));\n        context.add_global_action_without_shortcut(self, \"Share Server Memory Sample Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::MemorySample)));\n        context.add_global_action_without_shortcut(self, \"Share Server MemoryAllocatedWithoutCheck Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::MemoryAllocatedWithoutCheck)));\n        context.add_global_action_without_shortcut(self, \"Share Server Events Flamegraph\", |siv| siv.show_server_flamegraph(false, Some(TraceType::ProfileEvent)));\n        context.add_global_action_without_shortcut(self, \"Share Server Live Flamegraph\", |siv| siv.show_server_flamegraph(false, None));\n        context.add_global_action_without_shortcut(self, \"Jemalloc\", |siv| siv.show_jemalloc_flamegraph(true));\n        context.add_global_action_without_shortcut(self, \"Share Jemalloc\", |siv| siv.show_jemalloc_flamegraph(false));\n        context.add_global_action_without_shortcut(self, \"Server Perfetto Export\", |siv| siv.show_server_perfetto());\n\n        // If logging is done to file, console is always empty\n        if context.options.service.log.is_none() {\n            context.add_global_action(\n                self,\n                \"chdig debug console\",\n                '~',\n                toggle_flexi_logger_debug_console,\n            );\n        }\n        context.add_global_action(self, \"Toggle debug metrics\", '!', toggle_debug_metrics);\n        context.add_global_action(self, \"Back/Quit\", Key::Esc, |siv| siv.pop_ui(false));\n        context.add_global_action(self, \"Back/Quit\", 'q', |siv| siv.pop_ui(true));\n        context.add_global_action(self, \"Quit forcefully\", 'Q', |siv| siv.quit());\n        context.add_global_action(self, \"Back\", Key::Backspace, |siv| siv.pop_ui(false));\n        context.add_global_action(self, \"Toggle pause\", 'p', |siv| siv.toggle_pause_updates(None));\n        context.add_global_action(self, \"Refresh\", 'r', |siv| siv.refresh_view());\n\n        // Bindings T/t inspiried by atop(1) (so as this functionality)\n        context.add_global_action(self, \"Seek 10 mins backward\", 'T', |siv| siv.seek_time_frame(true));\n        context.add_global_action(self, \"Seek 10 mins forward\", 't', |siv| siv.seek_time_frame(false));\n        context.add_global_action(self, \"Set time interval\", Event::AltChar('t'), |siv| siv.select_time_frame());\n    }\n\n    fn initialize_views_menu(&mut self, context: ContextArc) {\n        use crate::view::providers::*;\n        use std::sync::Arc;\n\n        let mut c = context.lock().unwrap();\n\n        c.register_provider(Arc::new(ProcessesViewProvider));\n        c.register_provider(Arc::new(SlowQueryLogViewProvider));\n        c.register_provider(Arc::new(LastQueryLogViewProvider));\n        c.register_provider(Arc::new(MergesViewProvider));\n        c.register_provider(Arc::new(S3QueueViewProvider));\n        c.register_provider(Arc::new(AzureQueueViewProvider));\n        c.register_provider(Arc::new(MutationsViewProvider));\n        c.register_provider(Arc::new(ReplicatedFetchesViewProvider));\n        c.register_provider(Arc::new(ReplicationQueueViewProvider));\n        c.register_provider(Arc::new(ReplicasViewProvider));\n        c.register_provider(Arc::new(TablesViewProvider));\n        c.register_provider(Arc::new(BackgroundSchedulePoolViewProvider));\n        c.register_provider(Arc::new(BackgroundSchedulePoolLogViewProvider));\n        c.register_provider(Arc::new(TablePartsViewProvider));\n        c.register_provider(Arc::new(AsynchronousInsertsViewProvider));\n        c.register_provider(Arc::new(PartLogViewProvider));\n        c.register_provider(Arc::new(BackupsViewProvider));\n        c.register_provider(Arc::new(DictionariesViewProvider));\n        c.register_provider(Arc::new(ServerLogsViewProvider));\n        c.register_provider(Arc::new(LoggerNamesViewProvider));\n        c.register_provider(Arc::new(ErrorsViewProvider));\n        c.register_provider(Arc::new(ClientViewProvider));\n    }\n\n    fn show_help_dialog(&mut self) {\n        if self.has_view(\"help\") {\n            self.pop_layer();\n            return;\n        }\n\n        let mut text = StyledString::default();\n\n        text.append_styled(\n            format!(\"chdig v{version}\\n\", version = env!(\"CARGO_PKG_VERSION\")),\n            Effect::Bold,\n        );\n\n        {\n            let context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n\n            text.append_styled(\"\\nGlobal shortcuts:\\n\\n\", Effect::Bold);\n            for shortcut in context.global_actions.iter() {\n                text.append(shortcut.description.preview_styled());\n            }\n\n            text.append_styled(\"\\nActions:\\n\\n\", Effect::Bold);\n            for shortcut in context.view_actions.iter() {\n                text.append(shortcut.description.preview_styled());\n            }\n        }\n\n        text.append_styled(\"\\nExtended navigation:\\n\\n\", Effect::Bold);\n        text.append_styled(\n            format!(\"{:>10} - reset selection/follow item in table\\n\", \"Home\"),\n            Effect::Bold,\n        );\n\n        text.append_plain(format!(\n            \"\\nIssues and suggestions: {homepage}/issues\",\n            homepage = env!(\"CARGO_PKG_HOMEPAGE\")\n        ));\n\n        self.add_layer(Dialog::info(text).with_name(\"help\"));\n    }\n\n    fn show_settings_dialog(&mut self) {\n        settings_view::show_settings_dialog(self);\n    }\n\n    fn show_views(&mut self) {\n        let mut has_views = false;\n        let context = self.user_data::<ContextArc>().unwrap().clone();\n        self.call_on_name(\"left_menu\", |left_menu_view: &mut LinearLayout| {\n            if !left_menu_view.is_empty() {\n                left_menu_view\n                    .remove_child(left_menu_view.len() - 1)\n                    .expect(\"No child view to remove\");\n            } else {\n                let mut select = SelectView::new().autojump();\n                {\n                    let context = context.clone();\n                    select.set_on_submit(move |siv, selected_action: &str| {\n                        log::trace!(\"Switching to {:?}\", selected_action);\n\n                        siv.focus_name(\"main\").unwrap();\n                        {\n                            let action_callback = context\n                                .lock()\n                                .unwrap()\n                                .views_menu_actions\n                                .iter()\n                                .find(|x| x.description.text == selected_action)\n                                .unwrap()\n                                .callback\n                                .clone();\n                            action_callback.as_ref()(siv);\n                        };\n\n                        siv.call_on_name(\"left_menu\", |left_menu_view: &mut LinearLayout| {\n                            left_menu_view\n                                .remove_child(left_menu_view.len() - 1)\n                                .expect(\"No child view to remove\");\n                        });\n                    });\n                }\n\n                {\n                    let context = context.clone();\n                    let context = context.lock().unwrap();\n                    for action in context.views_menu_actions.iter() {\n                        select.add_item_str(action.description.text);\n                    }\n                }\n\n                let select = OnEventView::new(select)\n                    .on_pre_event_inner('k', |s, _| {\n                        let cb = s.select_up(1);\n                        Some(EventResult::Consumed(Some(cb)))\n                    })\n                    .on_pre_event_inner('j', |s, _| {\n                        let cb = s.select_down(1);\n                        Some(EventResult::Consumed(Some(cb)))\n                    })\n                    .with_name(\"actions_select\");\n\n                left_menu_view.add_child(select);\n\n                has_views = true;\n            }\n        });\n\n        if has_views {\n            self.focus_name(\"left_menu\").unwrap();\n        } else {\n            self.focus_name(\"main\").unwrap();\n        }\n    }\n\n    fn show_actions(&mut self) {\n        let mut has_actions = false;\n        let context = self.user_data::<ContextArc>().unwrap().clone();\n        self.call_on_name(\"left_menu\", |left_menu_view: &mut LinearLayout| {\n            if !left_menu_view.is_empty() {\n                left_menu_view\n                    .remove_child(left_menu_view.len() - 1)\n                    .expect(\"No child view to remove\");\n            } else {\n                let mut select = SelectView::new().autojump();\n                {\n                    let context = context.clone();\n                    select.set_on_submit(move |siv, selected_action: &str| {\n                        log::trace!(\"Triggering {:?} (from actions)\", selected_action);\n\n                        siv.focus_name(\"main\").unwrap();\n                        {\n                            let mut context = context.lock().unwrap();\n                            let action_callback = context\n                                .view_actions\n                                .iter()\n                                .find(|x| x.description.text == selected_action)\n                                .unwrap()\n                                .callback\n                                .clone();\n                            context.pending_view_callback = Some(action_callback);\n                        };\n                        siv.on_event(Event::Refresh);\n\n                        siv.call_on_name(\"left_menu\", |left_menu_view: &mut LinearLayout| {\n                            left_menu_view\n                                .remove_child(left_menu_view.len() - 1)\n                                .expect(\"No child view to remove\");\n                        });\n                    });\n                }\n\n                {\n                    let context = context.clone();\n                    let context = context.lock().unwrap();\n                    for action in context.view_actions.iter() {\n                        select.add_item_str(action.description.text);\n                    }\n                    if context.view_actions.is_empty() {\n                        return;\n                    }\n                }\n\n                let select = OnEventView::new(select)\n                    .on_pre_event_inner('k', |s, _| {\n                        let cb = s.select_up(1);\n                        Some(EventResult::Consumed(Some(cb)))\n                    })\n                    .on_pre_event_inner('j', |s, _| {\n                        let cb = s.select_down(1);\n                        Some(EventResult::Consumed(Some(cb)))\n                    })\n                    .with_name(\"actions_select\");\n\n                left_menu_view.add_child(select);\n\n                has_actions = true;\n            }\n        });\n\n        if has_actions {\n            self.focus_name(\"left_menu\").unwrap();\n        } else {\n            self.focus_name(\"main\").unwrap();\n        }\n    }\n\n    fn show_fuzzy_actions(&mut self) {\n        let context = self.user_data::<ContextArc>().unwrap().clone();\n        let all_actions = {\n            let context = context.lock().unwrap();\n            context\n                .global_actions\n                .iter()\n                .map(|x| &x.description)\n                .chain(context.view_actions.iter().map(|x| &x.description))\n                .chain(context.views_menu_actions.iter().map(|x| &x.description))\n                .cloned()\n                .collect()\n        };\n\n        fuzzy_actions(self, all_actions, move |siv, action_text| {\n            log::trace!(\"Triggering {:?} (from fuzzy search)\", action_text);\n\n            // Global callbacks\n            {\n                let action_callback = context\n                    .lock()\n                    .unwrap()\n                    .global_actions\n                    .iter()\n                    .find(|x| x.description.text == action_text)\n                    .map(|a| a.callback.clone());\n                if let Some(action_callback) = action_callback {\n                    action_callback.as_ref()(siv);\n                }\n            }\n\n            // View callbacks\n            {\n                let mut context = context.lock().unwrap();\n                if let Some(action) = context\n                    .view_actions\n                    .iter()\n                    .find(|x| x.description.text == action_text)\n                {\n                    context.pending_view_callback = Some(action.callback.clone());\n                    // The pending_view_callback handling is binded to Event::Refresh event, but it\n                    // cannot be called with the context locked, so it will be called\n                    // asynchronously after Event::Refresh below\n                    //\n                    // But, we also need it to cleanup the screen (to avoid any leftovers), so, it\n                    // will be called always.\n                }\n            }\n\n            // View menus\n            {\n                let action_callback = context\n                    .lock()\n                    .unwrap()\n                    .views_menu_actions\n                    .iter()\n                    .find(|x| x.description.text == action_text)\n                    .map(|a| a.callback.clone());\n                if let Some(action_callback) = action_callback {\n                    action_callback.as_ref()(siv);\n                }\n            }\n\n            siv.on_event(Event::Refresh);\n        });\n    }\n\n    fn show_server_flamegraph(&mut self, tui: bool, trace_type: Option<TraceType>) {\n        let mut context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n        let start: DateTime<Local> = context.options.view.start.clone().into();\n        let end: DateTime<Local> = context.options.view.end.clone().into();\n        if let Some(trace_type) = trace_type {\n            context.worker.send(\n                true,\n                WorkerEvent::ServerFlameGraph(tui, trace_type, start, end),\n            );\n        } else {\n            context\n                .worker\n                .send(true, WorkerEvent::LiveQueryFlameGraph(tui, None));\n        }\n    }\n\n    fn show_jemalloc_flamegraph(&mut self, tui: bool) {\n        let mut context = self.user_data::<ContextArc>().unwrap().lock().unwrap();\n        context\n            .worker\n            .send(true, WorkerEvent::JemallocFlameGraph(tui));\n    }\n\n    fn show_server_perfetto(&mut self) {\n        let context = self.user_data::<ContextArc>().unwrap().clone();\n        let (start_str, end_str) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.options.view.start.to_editable_string(),\n                ctx.options.view.end.to_editable_string(),\n            )\n        };\n\n        let on_submit = move |siv: &mut Cursive| {\n            let start_str = siv\n                .call_on_name(\"perfetto_start\", |view: &mut EditView| view.get_content())\n                .unwrap();\n            let end_str = siv\n                .call_on_name(\"perfetto_end\", |view: &mut EditView| view.get_content())\n                .unwrap();\n\n            let start = match start_str.parse::<crate::common::RelativeDateTime>() {\n                Ok(v) => v,\n                Err(err) => {\n                    siv.add_layer(Dialog::info(format!(\"Invalid start: {}\", err)));\n                    return;\n                }\n            };\n            let end = match end_str.parse::<crate::common::RelativeDateTime>() {\n                Ok(v) => v,\n                Err(err) => {\n                    siv.add_layer(Dialog::info(format!(\"Invalid end: {}\", err)));\n                    return;\n                }\n            };\n\n            siv.pop_layer();\n\n            let start_dt: DateTime<Local> = start.into();\n            let end_dt: DateTime<Local> = end.into();\n            let mut ctx = siv.user_data::<ContextArc>().unwrap().lock().unwrap();\n            ctx.worker\n                .send(true, WorkerEvent::ServerPerfettoExport(start_dt, end_dt));\n        };\n\n        let dialog = Dialog::new()\n            .title(\"Server Perfetto Export\")\n            .content(\n                LinearLayout::vertical()\n                    .child(TextView::new(\n                        \"Warning: server-wide export is heavy (~1.5 GiB/server\\nfor 2 min). Consider reducing the time range.\",\n                    ))\n                    .child(DummyView)\n                    .child(TextView::new(\"start:\"))\n                    .child(\n                        EditView::new()\n                            .content(start_str)\n                            .with_name(\"perfetto_start\")\n                            .fixed_width(30),\n                    )\n                    .child(DummyView)\n                    .child(TextView::new(\"end:\"))\n                    .child(\n                        EditView::new()\n                            .content(end_str)\n                            .with_name(\"perfetto_end\")\n                            .fixed_width(30),\n                    ),\n            )\n            .button(\"Export\", on_submit)\n            .button(\"Cancel\", |siv| {\n                siv.pop_layer();\n            });\n        self.add_layer(dialog);\n    }\n\n    fn show_connection_dialog(&mut self) {\n        let context_arc = self.user_data::<ContextArc>().unwrap().clone();\n        let context = context_arc.lock().unwrap();\n\n        let cluster = context.options.clickhouse.cluster.clone();\n        if cluster.is_none() {\n            drop(context);\n            self.add_layer(Dialog::info(\n                \"Cluster mode is not enabled. Use --cluster option.\",\n            ));\n            return;\n        }\n\n        let clickhouse = context.clickhouse.clone();\n        let cb_sink = context.cb_sink.clone();\n        drop(context);\n\n        std::thread::spawn(move || {\n            let runtime = tokio::runtime::Runtime::new().unwrap();\n            let hosts = runtime.block_on(async { clickhouse.get_cluster_hosts().await });\n\n            cb_sink\n                .send(Box::new(move |siv: &mut Cursive| match hosts {\n                    Ok(hosts) if !hosts.is_empty() => {\n                        let context_arc = siv.user_data::<ContextArc>().unwrap().clone();\n                        let mut items: Vec<(String, String)> = Vec::with_capacity(hosts.len() + 1);\n                        items.push((\"<All hosts (reset filter)>\".to_string(), String::new()));\n                        for host in hosts {\n                            items.push((host.clone(), host));\n                        }\n\n                        fuzzy_select_strings(\n                            siv,\n                            \"Filter by host\",\n                            items,\n                            move |siv, selected_host| {\n                                let current_view = {\n                                    let mut context = context_arc.lock().unwrap();\n\n                                    let url_safe = context.options.clickhouse.url_safe.clone();\n                                    if selected_host.is_empty() {\n                                        context.selected_host = None;\n                                        log::info!(\"Reset host filter\");\n                                        siv.set_statusbar_connection(url_safe);\n                                    } else {\n                                        context.selected_host = Some(selected_host.clone());\n                                        log::info!(\"Set host filter to: {}\", selected_host);\n                                        siv.set_statusbar_connection(format!(\n                                            \"{url_safe} (host: {selected_host})\"\n                                        ));\n                                    }\n\n                                    context\n                                        .current_view\n                                        .or(context.options.start_view)\n                                        .unwrap_or(ChDigViews::Queries)\n                                };\n\n                                log::info!(\"Reopen {:?} view\", current_view);\n\n                                let provider = context_arc\n                                    .lock()\n                                    .unwrap()\n                                    .view_registry\n                                    .get_by_view_type(current_view);\n\n                                siv.drop_main_view();\n                                provider.show(siv, context_arc.clone());\n\n                                context_arc.lock().unwrap().trigger_view_refresh();\n                            },\n                        );\n                    }\n                    Ok(_) => {\n                        siv.add_layer(Dialog::info(\"No hosts found in cluster\"));\n                    }\n                    Err(err) => {\n                        siv.add_layer(Dialog::info(format!(\n                            \"Failed to fetch cluster hosts: {}\",\n                            err\n                        )));\n                    }\n                }))\n                .unwrap();\n        });\n    }\n\n    fn drop_main_view(&mut self) {\n        while self.screen_mut().len() > 1 {\n            self.pop_layer();\n        }\n\n        self.call_on_name(\"main\", |main_view: &mut LinearLayout| {\n            // Views that should not be touched:\n            // - top bar (menu text + is_paused + status)\n            // - summary\n            if main_view.len() > 2 {\n                main_view\n                    .remove_child(main_view.len() - 1)\n                    .expect(\"No child view to remove\");\n            }\n        });\n    }\n\n    fn set_main_view<V: IntoBoxedView + 'static>(&mut self, view: V) {\n        self.call_on_name(\"main\", |main_view: &mut LinearLayout| {\n            main_view.add_child(view);\n        });\n    }\n\n    fn set_statusbar_version(&mut self, main_content: impl Into<SpannedString<Style>>) {\n        self.call_on_name(\"version\", |text_view: &mut TextView| {\n            let content: SpannedString<Style> = main_content.into();\n            let mut styled = StyledString::new();\n            // NOTE: may not work in some terminals\n            styled.append_styled(content.source(), Effect::Dim);\n            text_view.set_content(styled);\n        })\n        .expect(\"version\");\n    }\n\n    fn set_statusbar_content(&mut self, content: impl Into<SpannedString<Style>>) {\n        self.call_on_name(\"status\", |text_view: &mut TextView| {\n            text_view.set_content(content);\n        })\n        .expect(\"set_status\")\n    }\n\n    fn set_statusbar_connection(&mut self, content: impl Into<SpannedString<Style>>) {\n        self.call_on_name(\"connection\", |text_view: &mut TextView| {\n            text_view.set_content(content);\n        })\n        .expect(\"connection\");\n    }\n\n    fn set_statusbar_debug(&mut self, content: impl Into<SpannedString<Style>>) {\n        self.call_on_name(\"debug_status\", |text_view: &mut TextView| {\n            let spanned: SpannedString<Style> = content.into();\n            let src = spanned.source();\n            if src.is_empty() {\n                text_view.set_content(\"\");\n                return;\n            }\n            // Trailing space keeps the debug text from butting against the next\n            // status-bar element; gray makes it visually distinct from the main\n            // \"status\" message (which is full-intensity white).\n            let mut styled = StyledString::new();\n            styled.append_styled(format!(\"{} \", src), Color::Light(BaseColor::Black));\n            text_view.set_content(styled);\n        });\n    }\n\n    fn call_on_name_or_render_error<V, F>(&mut self, name: &str, callback: F)\n    where\n        V: View,\n        F: FnOnce(&mut V) -> Result<()>,\n    {\n        let ret = self.call_on_name(name, callback);\n        if let Some(Err(err)) = ret {\n            self.add_layer(Dialog::info(err.to_string()));\n        }\n    }\n}\n"
  },
  {
    "path": "src/view/provider.rs",
    "content": "use crate::interpreter::{ContextArc, options::ChDigViews};\nuse cursive::Cursive;\n\n/// Trait for providing views in the application.\n/// Each provider is responsible for showing a specific view type.\npub trait ViewProvider: Send + Sync {\n    /// Returns the unique name of this view provider\n    fn name(&self) -> &'static str;\n\n    /// Returns the view type enum value for this provider\n    fn view_type(&self) -> ChDigViews;\n\n    /// Shows the view in the given Cursive instance\n    fn show(&self, siv: &mut Cursive, context: ContextArc);\n}\n"
  },
  {
    "path": "src/view/providers/asynchronous_inserts.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, Navigation, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::Dialog,\n};\nuse std::collections::HashMap;\n\npub struct AsynchronousInsertsViewProvider;\n\nimpl ViewProvider for AsynchronousInsertsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Asynchronous Inserts\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::AsynchronousInserts\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_asynchronous_inserts(siv, context, None, None);\n    }\n}\n\nfn build_query(\n    context: &ContextArc,\n    filters: &super::TableFilterParams,\n    is_dialog: bool,\n) -> String {\n    let (limit, dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.options.clickhouse.limit,\n            ctx.clickhouse\n                .get_table_name(\"system\", \"asynchronous_inserts\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let mut where_clauses = filters.build_where_clauses();\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    let where_clause = if where_clauses.is_empty() {\n        String::new()\n    } else {\n        format!(\"WHERE {}\", where_clauses.join(\" AND \"))\n    };\n\n    let select_clause = if is_dialog {\n        r#\"query,\n            total_bytes,\n            format,\n            first_update::DateTime first_update\"#\n    } else {\n        r#\"database,\n            table,\n            query,\n            total_bytes,\n            format,\n            first_update::DateTime first_update\"#\n    };\n\n    format!(\n        r#\"\n        SELECT\n            {select_clause}\n        FROM {dbtable}\n        {where_clause}\n        ORDER BY first_update DESC\n        LIMIT {limit}\n        \"#,\n        select_clause = select_clause,\n        dbtable = dbtable,\n        where_clause = where_clause,\n        limit = limit,\n    )\n}\n\nfn get_columns(is_dialog: bool) -> (Vec<&'static str>, Vec<&'static str>) {\n    let columns = if is_dialog {\n        vec![\"query\", \"total_bytes\", \"format\", \"first_update\"]\n    } else {\n        vec![\n            \"database\",\n            \"table\",\n            \"query\",\n            \"total_bytes\",\n            \"format\",\n            \"first_update\",\n        ]\n    };\n    let columns_to_compare = vec![\"first_update\"];\n    (columns, columns_to_compare)\n}\n\nfn show_insert_details(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let width = columns.iter().map(|c| c.len()).max().unwrap_or_default();\n    let info = columns\n        .iter()\n        .filter_map(|c| map.get(*c).map(|v| (*c, v)))\n        .map(|(c, v)| format!(\"{:<width$}: {}\", c, v, width = width))\n        .collect::<Vec<_>>()\n        .join(\"\\n\");\n\n    siv.add_layer(Dialog::info(info).title(\"Asynchronous Insert Details\"));\n}\n\npub fn show_asynchronous_inserts(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let view_name = \"asynchronous_inserts\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = super::TableFilterParams::new(\n        database,\n        table,\n        \"asynchronous_inserts\",\n        \"Asynchronous Inserts\",\n    );\n\n    let query = build_query(&context, &filters, false);\n    let (columns, columns_to_compare) = get_columns(false);\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"first_update\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    view.get_inner_mut().set_on_submit(show_insert_details);\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_asynchronous_inserts_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let filters = super::TableFilterParams::new(\n        database,\n        table,\n        \"asynchronous_inserts\",\n        \"Asynchronous Inserts\",\n    );\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let query = build_query(&context, &filters, true);\n    let (columns, columns_to_compare) = get_columns(true);\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"first_update\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view.get_inner_mut().set_on_submit(show_insert_details);\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30)))\n            .title(\"Asynchronous Inserts\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/background_schedule_pool.rs",
    "content": "use crate::{\n    actions::ActionDescription,\n    interpreter::{ContextArc, WorkerEvent, options::ChDigViews},\n    utils::fuzzy_actions,\n    view::{self, navigation::Navigation, provider::ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    event::Event,\n    view::{Nameable, Resizable},\n    views::Dialog,\n};\nuse std::collections::HashMap;\n\npub struct BackgroundSchedulePoolViewProvider;\n\nimpl ViewProvider for BackgroundSchedulePoolViewProvider {\n    fn name(&self) -> &'static str {\n        \"Background Tasks\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::BackgroundSchedulePool\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"background_schedule_pool\") {\n            return;\n        }\n\n        let mut columns = vec![\n            \"pool\",\n            \"database\",\n            \"table\",\n            \"log_name\",\n            \"query_id\",\n            \"elapsed_ms\",\n            \"executing\",\n            \"scheduled\",\n            \"delayed\",\n        ];\n\n        let (cluster, dbtable, clickhouse, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.options.clickhouse.cluster.is_some(),\n                ctx.clickhouse\n                    .get_table_name_no_history(\"system\", \"background_schedule_pool\"),\n                ctx.clickhouse.clone(),\n                ctx.selected_host.clone(),\n            )\n        };\n\n        // Only show hostname column when in cluster mode AND no host filter is active\n        let columns_to_compare = if cluster && selected_host.is_none() {\n            columns.insert(0, \"hostName() host\");\n            vec![\"host\", \"pool\", \"database\", \"table\", \"log_name\"]\n        } else {\n            vec![\"pool\", \"database\", \"table\", \"log_name\"]\n        };\n\n        let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n        let where_clause = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\"WHERE 1 {}\", host_filter)\n        };\n\n        let query = format!(\n            \"SELECT {} FROM {} {} ORDER BY pool, database, table, log_name\",\n            columns.join(\", \"),\n            dbtable,\n            where_clause,\n        );\n\n        siv.drop_main_view();\n\n        let mut view = view::SQLQueryView::new(\n            context.clone(),\n            \"background_schedule_pool\",\n            \"elapsed_ms\",\n            columns.clone(),\n            columns_to_compare,\n            query,\n        )\n        .unwrap_or_else(|_| panic!(\"Cannot get background_schedule_pool\"));\n\n        let background_schedule_pool_action_callback =\n            move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n                show_background_schedule_pool_actions(siv, columns, row);\n            };\n        view.get_inner_mut()\n            .set_on_submit(background_schedule_pool_action_callback);\n        view.get_inner_mut().set_title(\"Background Schedule Pool\");\n\n        siv.set_main_view(view.with_name(\"background_schedule_pool\").full_screen());\n        siv.focus_name(\"background_schedule_pool\").unwrap();\n    }\n}\n\nfn show_background_schedule_pool_actions(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let actions = vec![\n        ActionDescription {\n            text: \"Show tasks logs\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show tasks\",\n            event: Event::Unknown(vec![]),\n        },\n    ];\n\n    let columns_clone = columns.clone();\n    let row_clone = row.clone();\n\n    fuzzy_actions(siv, actions, move |siv, selected| match selected.as_str() {\n        \"Show tasks logs\" => {\n            show_tasks_logs(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show tasks\" => {\n            show_tasks_summary(siv, columns_clone.clone(), row_clone.clone());\n        }\n        _ => {}\n    });\n}\n\nfn show_tasks_logs(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let log_name = map\n        .get(\"log_name\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let database = map\n        .get(\"database\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let table = map.get(\"table\").map(|s| s.to_owned()).unwrap_or_default();\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    let view_options = context.clone().lock().unwrap().options.view.clone();\n\n    context.lock().unwrap().worker.send(\n        true,\n        WorkerEvent::BackgroundSchedulePoolLogs(\n            Some(log_name),\n            database,\n            table,\n            view_options.start,\n            view_options.end,\n        ),\n    );\n}\n\nfn show_tasks_summary(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let log_name = map.get(\"log_name\").map(|s| s.to_owned());\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::background_schedule_pool_log::show_background_schedule_pool_log_dialog(\n        siv, context, log_name, database, table,\n    );\n}\n\npub fn show_background_schedule_pool_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let columns = vec![\n        \"pool\",\n        \"database\",\n        \"table\",\n        \"log_name\",\n        \"query_id\",\n        \"elapsed_ms\",\n        \"executing\",\n        \"scheduled\",\n        \"delayed\",\n    ];\n    let columns_to_compare = vec![\"pool\", \"database\", \"table\", \"log_name\"];\n\n    let (dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.clickhouse\n                .get_table_name_no_history(\"system\", \"background_schedule_pool\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let mut where_clauses: Vec<String> = Vec::new();\n\n    if let Some(ref db) = database {\n        where_clauses.push(format!(\"database = '{}'\", db.replace('\\'', \"''\")));\n    }\n    if let Some(ref tbl) = table {\n        where_clauses.push(format!(\"table = '{}'\", tbl.replace('\\'', \"''\")));\n    }\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    let where_clause = if where_clauses.is_empty() {\n        String::new()\n    } else {\n        format!(\"WHERE {}\", where_clauses.join(\" AND \"))\n    };\n\n    let query = format!(\n        \"SELECT {} FROM {} {} ORDER BY pool, database, table, log_name\",\n        columns.join(\", \"),\n        dbtable,\n        where_clause,\n    );\n\n    let title = match (&database, &table) {\n        (Some(db), Some(tbl)) => format!(\"Running tasks: {}.{}\", db, tbl),\n        (Some(db), None) => format!(\"Running tasks: {}\", db),\n        (None, Some(tbl)) => format!(\"Running tasks: table {}\", tbl),\n        (None, None) => \"Running tasks\".to_string(),\n    };\n\n    let view_name: &'static str = Box::leak(\n        format!(\n            \"background_schedule_pool_{}_{}\",\n            database.as_deref().unwrap_or(\"any\"),\n            table.as_deref().unwrap_or(\"any\")\n        )\n        .into_boxed_str(),\n    );\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"elapsed_ms\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    let action_callback =\n        move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n            show_background_schedule_pool_actions(siv, columns, row);\n        };\n    sql_view.get_inner_mut().set_on_submit(action_callback);\n    sql_view.get_inner_mut().set_title(&title);\n\n    siv.add_layer(Dialog::around(sql_view.with_name(view_name).min_size((140, 30))).title(title));\n}\n"
  },
  {
    "path": "src/view/providers/background_schedule_pool_log.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, clickhouse::TextLogArguments, options::ChDigViews},\n    view::{self, Navigation, TextLogView, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct BackgroundSchedulePoolLogViewProvider;\n\nimpl ViewProvider for BackgroundSchedulePoolLogViewProvider {\n    fn name(&self) -> &'static str {\n        \"Background Tasks History\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::BackgroundSchedulePoolLog\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_background_schedule_pool_log(siv, context, None, None, None);\n    }\n}\n\nstruct FilterParams {\n    log_name: Option<String>,\n    database: Option<String>,\n    table: Option<String>,\n}\n\nimpl FilterParams {\n    fn build_where_clauses(&self) -> Vec<String> {\n        let mut clauses = vec![\n            \"event_date BETWEEN toDate(start_) AND toDate(end_)\".to_string(),\n            \"event_time BETWEEN toDateTime(start_) AND toDateTime(end_)\".to_string(),\n        ];\n\n        if let Some(ref log_name) = self.log_name {\n            clauses.push(format!(\"log_name = '{}'\", log_name.replace('\\'', \"''\")));\n        }\n        if let Some(ref database) = self.database {\n            clauses.push(format!(\"database = '{}'\", database.replace('\\'', \"''\")));\n        }\n        if let Some(ref table) = self.table {\n            clauses.push(format!(\"table = '{}'\", table.replace('\\'', \"''\")));\n        }\n\n        clauses\n    }\n\n    fn build_title(&self, for_dialog: bool) -> String {\n        match (&self.log_name, &self.database, &self.table) {\n            (Some(ln), _, _) => {\n                if for_dialog {\n                    format!(\"Task summary: {}\", ln)\n                } else {\n                    format!(\"Background Tasks Logs: {}\", ln)\n                }\n            }\n            (None, Some(db), Some(tbl)) => {\n                if for_dialog {\n                    format!(\"Tasks for: {}.{}\", db, tbl)\n                } else {\n                    format!(\"Background Tasks Logs: {}.{}\", db, tbl)\n                }\n            }\n            (None, Some(db), None) => {\n                if for_dialog {\n                    format!(\"Tasks for: {}\", db)\n                } else {\n                    format!(\"Background Tasks Logs: {}\", db)\n                }\n            }\n            (None, None, Some(tbl)) => {\n                if for_dialog {\n                    format!(\"Tasks for table: {}\", tbl)\n                } else {\n                    format!(\"Background Tasks Logs: table {}\", tbl)\n                }\n            }\n            (None, None, None) => \"Background Tasks Logs\".to_string(),\n        }\n    }\n\n    fn generate_view_name(&self) -> String {\n        format!(\n            \"background_schedule_pool_log_{}_{}_{}\",\n            self.log_name.as_deref().unwrap_or(\"any\"),\n            self.database.as_deref().unwrap_or(\"any\"),\n            self.table.as_deref().unwrap_or(\"any\")\n        )\n    }\n}\n\nfn build_query(context: &ContextArc, filters: &FilterParams) -> String {\n    let (view_options, limit, dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.options.view.clone(),\n            ctx.options.clickhouse.limit,\n            ctx.clickhouse\n                .get_log_table_name(\"system\", \"background_schedule_pool_log\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let start_sql = view_options\n        .start\n        .to_sql_datetime_64()\n        .unwrap_or_else(|| \"now() - INTERVAL 1 HOUR\".to_string());\n    let end_sql = view_options\n        .end\n        .to_sql_datetime_64()\n        .unwrap_or_else(|| \"now()\".to_string());\n\n    let mut where_clauses = filters.build_where_clauses();\n\n    let host_filter = clickhouse.get_log_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    format!(\n        r#\"\n        WITH {start} AS start_, {end} AS end_\n        SELECT event_time, log_name, database, table, query_id, duration_ms, error, exception\n        FROM {dbtable}\n        WHERE\n            {where_clause}\n        ORDER BY event_time DESC\n        LIMIT {limit}\n        \"#,\n        start = start_sql,\n        end = end_sql,\n        dbtable = dbtable,\n        where_clause = where_clauses.join(\" AND \"),\n        limit = limit,\n    )\n}\n\nfn get_columns() -> (Vec<&'static str>, Vec<&'static str>) {\n    let columns = vec![\n        \"event_time\",\n        \"log_name\",\n        \"database\",\n        \"table\",\n        \"query_id\",\n        \"duration_ms\",\n        \"error\",\n        \"exception\",\n    ];\n    let columns_to_compare = vec![\"event_time\", \"log_name\", \"database\", \"table\"];\n    (columns, columns_to_compare)\n}\n\nfn show_task_logs(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let log_name = map\n        .get(\"log_name\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let query_id = map\n        .get(\"query_id\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n\n    if query_id.is_empty() {\n        return;\n    }\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    let view_options = context.clone().lock().unwrap().options.view.clone();\n\n    siv.add_layer(Dialog::around(\n        LinearLayout::vertical()\n            .child(TextView::new(format!(\"Logs for {} ({})\", log_name, query_id)).center())\n            .child(DummyView.fixed_height(1))\n            .child(NamedView::new(\n                \"background_task_logs\",\n                TextLogView::new(\n                    \"background_task_logs\",\n                    context,\n                    TextLogArguments {\n                        query_ids: Some(vec![query_id]),\n                        logger_names: None,\n                        hostname: None,\n                        message_filter: None,\n                        max_level: None,\n                        start: view_options.start.into(),\n                        end: view_options.end,\n                    },\n                ),\n            )),\n    ));\n    siv.focus_name(\"background_task_logs\").ok();\n}\n\npub fn show_background_schedule_pool_log(\n    siv: &mut Cursive,\n    context: ContextArc,\n    log_name: Option<String>,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let view_name = \"background_schedule_pool_log\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = FilterParams {\n        log_name,\n        database,\n        table,\n    };\n\n    let query = build_query(&context, &filters);\n    let (columns, columns_to_compare) = get_columns();\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"event_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    view.get_inner_mut().set_on_submit(show_task_logs);\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_background_schedule_pool_log_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    log_name: Option<String>,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let filters = FilterParams {\n        log_name,\n        database,\n        table,\n    };\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let query = build_query(&context, &filters);\n    let (columns, columns_to_compare) = get_columns();\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"event_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view.get_inner_mut().set_on_submit(show_task_logs);\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30)))\n            .title(\"Background Schedule Pool Logs\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/backups.rs",
    "content": "use crate::{\n    common::RelativeDateTime,\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, TextLogView, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::Resizable,\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct BackupsViewProvider;\n\nimpl ViewProvider for BackupsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Backups\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Backups\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        let columns = vec![\n            \"name\",\n            \"status::String status\",\n            \"error\",\n            \"start_time\",\n            \"end_time\",\n            \"total_size\",\n            \"query_id _query_id\",\n        ];\n\n        let backups_logs_callback =\n            move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n                let mut map = HashMap::new();\n                columns.iter().zip(row.0.iter()).for_each(|(c, r)| {\n                    map.insert(c.to_string(), r);\n                });\n\n                let context = siv.user_data::<ContextArc>().unwrap().clone();\n                siv.add_layer(Dialog::around(\n                    LinearLayout::vertical()\n                        .child(TextView::new(\"Logs:\").center())\n                        .child(DummyView.fixed_height(1))\n                        .child(NamedView::new(\n                            \"backups_logs\",\n                            TextLogView::new(\n                                \"backups_logs\",\n                                context,\n                                crate::interpreter::TextLogArguments {\n                                    query_ids: Some(vec![map[\"_query_id\"].to_string()]),\n                                    logger_names: None,\n                                    hostname: None,\n                                    message_filter: None,\n                                    max_level: None,\n                                    start: map[\"start_time\"].as_datetime().unwrap(),\n                                    end: RelativeDateTime::from(map[\"end_time\"].as_datetime()),\n                                },\n                            ),\n                        )),\n                ));\n                siv.focus_name(\"backups_logs\").unwrap();\n            };\n\n        // TODO:\n        // - order by elapsed time\n        super::render_from_clickhouse_query(\n            siv,\n            super::RenderFromClickHouseQueryArguments {\n                context,\n                table: &[\"backups\"],\n                join: None,\n                filter: None,\n                sort_by: \"total_size\",\n                columns,\n                columns_to_compare: vec![\"name\"],\n                on_submit: Some(backups_logs_callback),\n                settings: HashMap::<&str, i32>::new(),\n            },\n        );\n    }\n}\n"
  },
  {
    "path": "src/view/providers/client.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    utils::TerminalRawModeGuard,\n    view::ViewProvider,\n};\nuse cursive::{Cursive, views::Dialog};\nuse percent_encoding::percent_decode;\nuse std::collections::HashMap;\n#[cfg(unix)]\nuse std::os::unix::process::CommandExt;\nuse std::process::Command;\n\n/// Parse a clickhouse-rs duration string (e.g. \"600s\", \"500ms\") into microseconds.\nfn parse_duration_us(s: &str) -> Option<u64> {\n    if let Some(ms) = s.strip_suffix(\"ms\") {\n        ms.parse::<u64>().ok().map(|v| v * 1_000)\n    } else if let Some(secs) = s.strip_suffix('s') {\n        secs.parse::<u64>().ok().map(|v| v * 1_000_000)\n    } else {\n        s.parse::<u64>().ok().map(|v| v * 1_000_000)\n    }\n}\n\npub struct ClientViewProvider;\n\nimpl ClientViewProvider {\n    #[cfg(unix)]\n    fn spawn_and_wait(cmd: &mut Command) -> std::io::Result<std::process::ExitStatus> {\n        // Ignore SIGINT/SIGTTOU: SIGINT because we're no longer the foreground\n        // group (child is), SIGTTOU because tcsetpgrp from a background group\n        // would otherwise stop us.\n        let prev_sigint = unsafe { libc::signal(libc::SIGINT, libc::SIG_IGN) };\n        let prev_sigttou = unsafe { libc::signal(libc::SIGTTOU, libc::SIG_IGN) };\n\n        let result = cmd.spawn().and_then(|mut child| {\n            let child_pid = child.id() as libc::pid_t;\n            unsafe { libc::tcsetpgrp(libc::STDIN_FILENO, child_pid) };\n            let status = child.wait();\n            unsafe { libc::tcsetpgrp(libc::STDIN_FILENO, libc::getpgrp()) };\n            status\n        });\n\n        unsafe { libc::signal(libc::SIGTTOU, prev_sigttou) };\n        unsafe { libc::signal(libc::SIGINT, prev_sigint) };\n\n        result\n    }\n\n    #[cfg(not(unix))]\n    fn spawn_and_wait(cmd: &mut Command) -> std::io::Result<std::process::ExitStatus> {\n        cmd.spawn().and_then(|mut child| child.wait())\n    }\n}\n\nimpl ViewProvider for ClientViewProvider {\n    fn name(&self) -> &'static str {\n        \"Client\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Client\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        let options = context.lock().unwrap().options.clickhouse.clone();\n\n        let mut cmd = Command::new(\"clickhouse\");\n        cmd.arg(\"client\");\n\n        if let Some(config) = &options.config {\n            cmd.arg(\"--config\").arg(config);\n        }\n\n        if let Some(history_file) = &options.history_file {\n            // Some version does not expand HOME in the --history_file passed from command line argument\n            let expanded = if let Some(stripped) = history_file.strip_prefix(\"~/\") {\n                if let Ok(home) = std::env::var(\"HOME\") {\n                    format!(\"{}/{}\", home, stripped)\n                } else {\n                    history_file.clone()\n                }\n            } else {\n                history_file.clone()\n            };\n            cmd.arg(\"--history_file\").arg(expanded);\n        }\n\n        if let Some(url) = &options.url\n            && let Ok(url) = url::Url::parse(url)\n        {\n            if let Some(host) = &url.host() {\n                cmd.arg(\"--host\").arg(host.to_string());\n            }\n            if let Some(port) = &url.port() {\n                cmd.arg(\"--port\").arg(port.to_string());\n            }\n            if !url.username().is_empty() {\n                cmd.arg(\"--user\").arg(url.username());\n            }\n            if let Some(password) = &url.password() {\n                cmd.arg(\"--password\").arg(\n                    percent_decode(password.as_bytes())\n                        .decode_utf8_lossy()\n                        .to_string(),\n                );\n            }\n\n            let database = url.path().strip_prefix('/').unwrap_or_default();\n            if !database.is_empty() {\n                cmd.arg(\"--database\").arg(database);\n            }\n\n            let pairs: HashMap<_, _> = url.query_pairs().into_owned().collect();\n            for (key, value) in &pairs {\n                match key.as_str() {\n                    // clickhouse-rs internal settings, not relevant for client\n                    \"compression\" | \"pool_min\" | \"pool_max\" | \"nodelay\" | \"keepalive\"\n                    | \"ping_before_query\" | \"send_retries\" | \"retry_timeout\" | \"ping_timeout\"\n                    | \"insert_timeout\" | \"execute_timeout\" | \"alt_hosts\" | \"client_name\" => {}\n                    // only via client config\n                    \"ca_certificate\" => {}\n                    \"client_certificate\" => {}\n                    \"client_private_key\" => {}\n                    // mapped to different client flag names\n                    \"skip_verify\" => {\n                        if value == \"true\" {\n                            cmd.arg(\"--accept-invalid-certificate\");\n                        }\n                    }\n                    \"secure\" => {\n                        if value == \"true\" {\n                            cmd.arg(\"--secure\");\n                        } else {\n                            cmd.arg(\"--no-secure\");\n                        }\n                    }\n                    \"connection_timeout\" => {\n                        if let Some(us) = parse_duration_us(value) {\n                            if !pairs.contains_key(\"connect_timeout\") {\n                                cmd.arg(format!(\"--connect_timeout={}\", us / 1_000_000));\n                            }\n                            if !pairs.contains_key(\"connect_timeout_with_failover_ms\") {\n                                cmd.arg(format!(\n                                    \"--connect_timeout_with_failover_ms={}\",\n                                    us / 1_000\n                                ));\n                            }\n                            if !pairs.contains_key(\"connect_timeout_with_failover_secure_ms\") {\n                                cmd.arg(format!(\n                                    \"--connect_timeout_with_failover_secure_ms={}\",\n                                    us / 1_000\n                                ));\n                            }\n                        }\n                    }\n                    \"query_timeout\" => {\n                        if let Some(us) = parse_duration_us(value)\n                            && !pairs.contains_key(\"max_execution_time\")\n                        {\n                            cmd.arg(format!(\"--max_execution_time={}\", us / 1_000_000));\n                        }\n                    }\n                    // pass through as-is (query settings like skip_unavailable_shards, etc.)\n                    _ => {\n                        cmd.arg(format!(\"--{}={}\", key, value));\n                    }\n                }\n            }\n        }\n\n        let cb_sink = siv.cb_sink().clone();\n        let cmd_line = format!(\"{:?}\", cmd);\n        log::info!(\"Spawning client: {}\", cmd_line);\n\n        // Spawn the child in its own process group and give it the terminal\n        // foreground, like a shell does for foreground jobs. This way Ctrl-C is\n        // delivered only to the child's group and chdig's terminal state stays clean.\n        #[cfg(unix)]\n        cmd.process_group(0);\n\n        let mut guard = TerminalRawModeGuard::leave();\n        eprintln!(\"\\n--- chdig: launching clickhouse client ---\\n\");\n\n        let result = Self::spawn_and_wait(&mut cmd);\n\n        if let Err(e) = guard.restore() {\n            log::error!(\"Failed to restore terminal: {}\", e);\n            siv.quit();\n            return;\n        }\n\n        match result {\n            Ok(status) => {\n                cb_sink\n                    .send(Box::new(move |siv| {\n                        siv.complete_clear();\n                        if !status.success() {\n                            siv.add_layer(Dialog::info(format!(\n                                \"clickhouse client exited with status: {}\\n\\nCommand: {}\",\n                                status, cmd_line\n                            )));\n                        }\n                    }))\n                    .ok();\n            }\n            Err(err) => {\n                cb_sink.send(Box::new(move |siv| {\n                    siv.complete_clear();\n                    siv.add_layer(Dialog::info(format!(\n                        \"Failed to spawn clickhouse client: {}\\n\\nCommand: {}\\n\\nMake sure clickhouse is installed and in PATH\",\n                        err, cmd_line\n                    )));\n                })).ok();\n            }\n        }\n\n        siv.complete_clear();\n        log::info!(\"Client terminated.\");\n    }\n}\n"
  },
  {
    "path": "src/view/providers/dictionaries.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::ViewProvider,\n};\nuse cursive::Cursive;\nuse std::collections::HashMap;\n\npub struct DictionariesViewProvider;\n\nimpl ViewProvider for DictionariesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Dictionaries\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Dictionaries\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        let columns = vec![\n            \"name\",\n            \"status::String status\",\n            \"source\",\n            \"bytes_allocated memory\",\n            \"query_count queries\",\n            \"found_rate\",\n            \"load_factor\",\n            \"last_successful_update_time last_update\",\n            \"loading_duration\",\n            \"last_exception\",\n            \"origin\",\n        ];\n\n        super::render_from_clickhouse_query(\n            siv,\n            super::RenderFromClickHouseQueryArguments {\n                context,\n                table: &[\"dictionaries\"],\n                join: None,\n                filter: None,\n                sort_by: \"memory\",\n                columns,\n                columns_to_compare: vec![\"name\"],\n                on_submit: Some(super::query_result_show_row),\n                settings: HashMap::<&str, i32>::new(),\n            },\n        );\n    }\n}\n"
  },
  {
    "path": "src/view/providers/errors.rs",
    "content": "use crate::{\n    common::RelativeDateTime,\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, QueryResultRow, TextLogView, ViewProvider, navigation::Navigation},\n};\nuse chrono::{DateTime, Duration, Local};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct ErrorsViewProvider;\n\nimpl ViewProvider for ErrorsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Errors\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Errors\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"errors\") {\n            return;\n        }\n\n        let columns = vec![\n            \"name\",\n            \"sum(value) total\",\n            \"total bar\",\n            \"max(last_error_time) error_time\",\n            // \"toValidUTF8(last_error_message) _error_message\",\n            \"arrayStringConcat(arrayMap(addr -> concat(addressToLine(addr), '::', demangle(addressToSymbol(addr))), argMax(last_error_trace, last_error_time)), '\\n') _error_trace\",\n        ];\n        let columns_to_compare = vec![\"name\"];\n\n        let (dbtable, clickhouse, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.clickhouse.get_table_name(\"system\", \"errors\"),\n                ctx.clickhouse.clone(),\n                ctx.selected_host.clone(),\n            )\n        };\n\n        let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n        let where_clause = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\"WHERE 1 {}\", host_filter)\n        };\n\n        let query = format!(\n            \"SELECT {} FROM {} {} GROUP BY name SETTINGS allow_introspection_functions=1\",\n            columns.join(\", \"),\n            dbtable,\n            where_clause,\n        );\n\n        siv.drop_main_view();\n\n        let errors_logs_callback =\n            |siv: &mut Cursive, columns: Vec<&'static str>, row: QueryResultRow| {\n                let row_data = row.0;\n\n                let mut map = HashMap::<String, String>::new();\n                columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n                    map.insert(c.to_string(), r.to_string());\n                });\n\n                let error_time = map\n                    .get(\"error_time\")\n                    .and_then(|t| t.parse::<DateTime<Local>>().ok())\n                    .unwrap_or_else(Local::now);\n                let error_name = map.get(\"name\").map(|s| s.to_string()).unwrap_or_default();\n\n                let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n                // Show logs for 1 minute before and after the error time\n                // (Note, we need to add at least 1 second to error_time, otherwise it will be\n                // filtered out by event_time_microseconds condition)\n                let offset = Duration::try_minutes(1).unwrap_or_default();\n                let end_time = error_time + offset;\n                let start_time = error_time - offset;\n\n                siv.add_layer(Dialog::around(\n                    LinearLayout::vertical()\n                        .child(TextView::new(format!(\"Logs for error: {}\", error_name)).center())\n                        .child(DummyView.fixed_height(1))\n                        .child(NamedView::new(\n                            \"error_logs\",\n                            TextLogView::new(\n                                \"error_logs\",\n                                context,\n                                crate::interpreter::TextLogArguments {\n                                    query_ids: None,\n                                    logger_names: None,\n                                    hostname: None,\n                                    message_filter: Some(error_name),\n                                    max_level: Some(\"Warning\".to_string()),\n                                    start: start_time,\n                                    end: RelativeDateTime::from(end_time),\n                                },\n                            ),\n                        )),\n                ));\n                siv.focus_name(\"error_logs\").unwrap();\n            };\n\n        let mut view = view::SQLQueryView::new(\n            context.clone(),\n            \"errors\",\n            \"total\",\n            columns,\n            columns_to_compare,\n            query,\n        )\n        .unwrap_or_else(|_| panic!(\"Cannot get errors\"));\n        view.get_inner_mut().set_on_submit(errors_logs_callback);\n        view.get_inner_mut().set_title(\"errors\");\n        view.get_inner_mut().set_bar_columns(vec![(\"bar\", \"total\")]);\n\n        siv.set_main_view(view.with_name(\"errors\").full_screen());\n        siv.focus_name(\"errors\").unwrap();\n    }\n}\n"
  },
  {
    "path": "src/view/providers/logger_names.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, Navigation, TextLogView, ViewProvider},\n};\nuse chrono::{DateTime, Local};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct LoggerNamesViewProvider;\n\nimpl ViewProvider for LoggerNamesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Loggers\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Loggers\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"logger_names\") {\n            return;\n        }\n\n        let (view_options, cluster, selected_host_check) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.options.view.clone(),\n                ctx.options.clickhouse.cluster.is_some(),\n                ctx.selected_host.clone(),\n            )\n        };\n        let start = DateTime::<Local>::from(view_options.start);\n        let end = view_options.end;\n\n        let mut columns = vec![\n            \"logger_name::String logger_name\",\n            \"count() count\",\n            \"countIf(level = 'Fatal') fatal\",\n            \"countIf(level = 'Critical') critical\",\n            \"countIf(level = 'Error') error\",\n            \"countIf(level = 'Warning') warning\",\n            \"countIf(level = 'Notice') notice\",\n            \"countIf(level = 'Information') information\",\n            \"countIf(level = 'Debug') debug\",\n            \"countIf(level = 'Trace') trace\",\n        ];\n\n        // Only show hostname column when in cluster mode AND no host filter is active\n        let columns_to_compare = if cluster && selected_host_check.is_none() {\n            columns.insert(0, \"hostName() host\");\n            vec![\"host\", \"logger_name\"]\n        } else {\n            vec![\"logger_name\"]\n        };\n\n        let logger_names_callback =\n            move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n                let row = row.0;\n                let mut map = HashMap::<String, String>::new();\n                columns.iter().zip(row.iter()).for_each(|(c, r)| {\n                    map.insert(c.to_string(), r.to_string());\n                });\n\n                let logger_name = map.get(\"logger_name\").unwrap().clone();\n                let context = siv.user_data::<ContextArc>().unwrap().clone();\n                let view_options = context.lock().unwrap().options.view.clone();\n\n                siv.add_layer(Dialog::around(\n                    LinearLayout::vertical()\n                        .child(TextView::new(format!(\"Logs for logger: {}\", logger_name)).center())\n                        .child(DummyView.fixed_height(1))\n                        .child(NamedView::new(\n                            \"logger_logs\",\n                            TextLogView::new(\n                                \"logger_logs\",\n                                context,\n                                crate::interpreter::TextLogArguments {\n                                    query_ids: None,\n                                    logger_names: Some(vec![logger_name]),\n                                    hostname: None,\n                                    message_filter: None,\n                                    max_level: None,\n                                    start: DateTime::<Local>::from(view_options.start),\n                                    end: view_options.end,\n                                },\n                            ),\n                        )),\n                ));\n                siv.focus_name(\"logger_logs\").unwrap();\n            };\n\n        // Build the query with time filtering\n        let (dbtable, clickhouse, selected_host, limit) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.clickhouse.get_log_table_name(\"system\", \"text_log\"),\n                ctx.clickhouse.clone(),\n                ctx.selected_host.clone(),\n                ctx.options.clickhouse.limit,\n            )\n        };\n\n        let start_nanos = start\n            .timestamp_nanos_opt()\n            .ok_or(anyhow::anyhow!(\"Invalid start time\"))\n            .unwrap();\n        let end_datetime = end.to_sql_datetime_64().unwrap_or_default();\n\n        let host_filter = clickhouse.get_log_host_filter_clause(selected_host.as_ref());\n        let host_where = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\"\\n                {}\", host_filter)\n        };\n\n        let query = format!(\n            r#\"\n            WITH\n                fromUnixTimestamp64Nano({}) AS start_time_,\n                {} AS end_time_\n            SELECT {}\n            FROM {}\n            WHERE\n                event_date >= toDate(start_time_) AND event_time >= toDateTime(start_time_) AND event_time_microseconds > start_time_\n                AND event_date <= toDate(end_time_) AND event_time <= toDateTime(end_time_) AND event_time_microseconds <= end_time_{}\n            GROUP BY {}\n            ORDER BY count DESC\n            LIMIT {}\n            \"#,\n            start_nanos,\n            end_datetime,\n            columns.join(\", \"),\n            dbtable,\n            host_where,\n            if cluster {\n                \"host, logger_name\"\n            } else {\n                \"logger_name\"\n            },\n            limit,\n        );\n\n        siv.drop_main_view();\n\n        let mut view = view::SQLQueryView::new(\n            context.clone(),\n            \"logger_names\",\n            \"count\",\n            columns.clone(),\n            columns_to_compare,\n            query,\n        )\n        .unwrap_or_else(|_| panic!(\"Cannot get logger_names\"));\n        view.get_inner_mut().set_on_submit(logger_names_callback);\n        view.get_inner_mut().set_title(\"Loggers\");\n\n        siv.set_main_view(view.with_name(\"logger_names\").full_screen());\n        siv.focus_name(\"logger_names\").unwrap();\n    }\n}\n"
  },
  {
    "path": "src/view/providers/merges.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, Navigation, TextLogView, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct MergesViewProvider;\n\nimpl ViewProvider for MergesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Merges\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Merges\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_merges(siv, context, None, None);\n    }\n}\n\nfn get_columns(is_dialog: bool) -> Vec<&'static str> {\n    if is_dialog {\n        vec![\n            \"result_part_name part\",\n            \"elapsed\",\n            \"progress\",\n            \"num_parts parts\",\n            \"is_mutation mutation\",\n            \"total_size_bytes_compressed size\",\n            \"rows_read\",\n            \"rows_written\",\n            \"memory_usage memory\",\n            \"now()-elapsed _create_time\",\n            \"tables.uuid::String _table_uuid\",\n        ]\n    } else {\n        vec![\n            \"database\",\n            \"table\",\n            \"result_part_name part\",\n            \"elapsed\",\n            \"progress\",\n            \"num_parts parts\",\n            \"is_mutation mutation\",\n            \"total_size_bytes_compressed size\",\n            \"rows_read\",\n            \"rows_written\",\n            \"memory_usage memory\",\n            \"now()-elapsed _create_time\",\n            \"tables.uuid::String _table_uuid\",\n        ]\n    }\n}\n\nfn build_query(\n    context: &ContextArc,\n    filters: &super::TableFilterParams,\n    is_dialog: bool,\n) -> String {\n    let columns = get_columns(is_dialog);\n    let mut where_clauses = filters.build_where_clauses();\n\n    let (tables_dbtable, merges_dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.clickhouse.get_table_name(\"system\", \"tables\"),\n            ctx.clickhouse.get_table_name(\"system\", \"merges\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    let where_clause = if where_clauses.is_empty() {\n        String::new()\n    } else {\n        format!(\" WHERE {}\", where_clauses.join(\" AND \"))\n    };\n\n    // NOTE: On 25.8 it fails with \"No alias for subquery or table function in JOIN\" w/ old analyzer\n    format!(\n        \"select {} from {} as merges left join (select distinct on (database, name) database, name, uuid from {}) tables on merges.database = tables.database and merges.table = tables.name{} SETTINGS allow_experimental_analyzer=1\",\n        columns.join(\", \"),\n        merges_dbtable,\n        tables_dbtable,\n        where_clause,\n    )\n}\n\nfn get_merges_logs_callback()\n-> impl Fn(&mut Cursive, Vec<&'static str>, view::QueryResultRow) + 'static {\n    move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n        let mut map = HashMap::new();\n        columns.iter().zip(row.0.iter()).for_each(|(c, r)| {\n            map.insert(c.to_string(), r);\n        });\n\n        let context = siv.user_data::<ContextArc>().unwrap().clone();\n        siv.add_layer(Dialog::around(\n            LinearLayout::vertical()\n                .child(TextView::new(\"Logs:\").center())\n                .child(DummyView.fixed_height(1))\n                .child(NamedView::new(\n                    \"merge_logs\",\n                    TextLogView::new(\n                        \"merge_logs\",\n                        context,\n                        crate::interpreter::TextLogArguments {\n                            query_ids: Some(vec![format!(\n                                \"{}::{}\",\n                                map[\"_table_uuid\"].to_string(),\n                                map[\"part\"].to_string()\n                            )]),\n                            logger_names: None,\n                            hostname: None,\n                            message_filter: None,\n                            max_level: None,\n                            start: map[\"_create_time\"].as_datetime().unwrap(),\n                            end: crate::common::RelativeDateTime::new(None),\n                        },\n                    ),\n                )),\n        ));\n        siv.focus_name(\"merge_logs\").unwrap();\n    }\n}\n\nfn show_merges(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let view_name = \"merges\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = super::TableFilterParams::new(database, table, \"merges\", \"Merges\")\n        .with_table_prefix(\"merges\");\n    let columns = get_columns(false);\n    let query = build_query(&context, &filters, false);\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"elapsed\",\n        columns.clone(),\n        vec![\"database\", \"table\", \"part\"],\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    view.get_inner_mut()\n        .set_on_submit(get_merges_logs_callback());\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_merges_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let filters = super::TableFilterParams::new(database, table, \"merges\", \"Merges\")\n        .with_table_prefix(\"merges\");\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let columns = get_columns(true);\n    let query = build_query(&context, &filters, true);\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"elapsed\",\n        columns,\n        vec![\"part\"],\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view\n        .get_inner_mut()\n        .set_on_submit(get_merges_logs_callback());\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30))).title(\"Merges\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/mod.rs",
    "content": "pub mod asynchronous_inserts;\nmod background_schedule_pool;\nmod background_schedule_pool_log;\nmod backups;\nmod client;\nmod dictionaries;\nmod errors;\nmod logger_names;\npub mod merges;\npub mod mutations;\nmod object_storage_queue;\npub mod part_log;\nmod queries;\nmod replicas;\nmod replicated_fetches;\nmod replication_queue;\nmod server_logs;\npub mod table_parts;\nmod tables;\n\npub use asynchronous_inserts::AsynchronousInsertsViewProvider;\npub use background_schedule_pool::BackgroundSchedulePoolViewProvider;\npub use background_schedule_pool_log::BackgroundSchedulePoolLogViewProvider;\npub use backups::BackupsViewProvider;\npub use client::ClientViewProvider;\npub use dictionaries::DictionariesViewProvider;\npub use errors::ErrorsViewProvider;\npub use logger_names::LoggerNamesViewProvider;\npub use merges::MergesViewProvider;\npub use mutations::MutationsViewProvider;\npub use object_storage_queue::{AzureQueueViewProvider, S3QueueViewProvider};\npub use part_log::PartLogViewProvider;\npub use queries::{LastQueryLogViewProvider, ProcessesViewProvider, SlowQueryLogViewProvider};\npub use replicas::ReplicasViewProvider;\npub use replicated_fetches::ReplicatedFetchesViewProvider;\npub use replication_queue::ReplicationQueueViewProvider;\npub use server_logs::ServerLogsViewProvider;\npub use table_parts::TablePartsViewProvider;\npub use tables::TablesViewProvider;\n\nuse crate::{\n    interpreter::ContextArc,\n    view::{self, QueryResultRow, TextLogView},\n};\nuse chrono::{DateTime, Local};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct TableFilterParams {\n    pub database: Option<String>,\n    pub table: Option<String>,\n    view_name_prefix: &'static str,\n    display_name: &'static str,\n    display_name_lower: &'static str,\n    table_prefix: Option<&'static str>,\n}\n\nimpl TableFilterParams {\n    pub fn new(\n        database: Option<String>,\n        table: Option<String>,\n        view_name_prefix: &'static str,\n        display_name: &'static str,\n    ) -> Self {\n        Self {\n            database,\n            table,\n            view_name_prefix,\n            display_name,\n            display_name_lower: Box::leak(display_name.to_lowercase().into_boxed_str()),\n            table_prefix: None,\n        }\n    }\n\n    pub fn with_table_prefix(mut self, prefix: &'static str) -> Self {\n        self.table_prefix = Some(prefix);\n        self\n    }\n\n    pub fn build_where_clauses(&self) -> Vec<String> {\n        let mut clauses = vec![];\n        let prefix = self\n            .table_prefix\n            .map(|p| format!(\"{}.\", p))\n            .unwrap_or_default();\n\n        if let Some(ref database) = self.database {\n            clauses.push(format!(\n                \"{}database = '{}'\",\n                prefix,\n                database.replace('\\'', \"''\")\n            ));\n        }\n        if let Some(ref table) = self.table {\n            clauses.push(format!(\"{}table = '{}'\", prefix, table.replace('\\'', \"''\")));\n        }\n\n        clauses\n    }\n\n    pub fn build_title(&self, for_dialog: bool) -> String {\n        match (&self.database, &self.table) {\n            (Some(db), Some(tbl)) => {\n                if for_dialog {\n                    format!(\"{} for: {}.{}\", self.display_name, db, tbl)\n                } else {\n                    format!(\"{}: {}.{}\", self.display_name, db, tbl)\n                }\n            }\n            (Some(db), None) => {\n                if for_dialog {\n                    format!(\"{} for database: {}\", self.display_name, db)\n                } else {\n                    format!(\"{}: {}\", self.display_name, db)\n                }\n            }\n            (None, Some(tbl)) => {\n                if for_dialog {\n                    format!(\"{} for table: {}\", self.display_name_lower, tbl)\n                } else {\n                    format!(\"{}: table {}\", self.display_name, tbl)\n                }\n            }\n            (None, None) => self.display_name.to_string(),\n        }\n    }\n\n    pub fn generate_view_name(&self) -> String {\n        format!(\n            \"{}_{}_{}\",\n            self.view_name_prefix,\n            self.database.as_deref().unwrap_or(\"any\"),\n            self.table.as_deref().unwrap_or(\"any\"),\n        )\n    }\n}\n\nfn is_valid_identifier_begin(c: char) -> bool {\n    c.is_ascii_alphabetic() || c == '_'\n}\n\nfn is_word_char_ascii(c: char) -> bool {\n    c.is_ascii_alphanumeric() || c == '_'\n}\n\nfn is_valid_identifier(s: &str) -> bool {\n    if s.is_empty() {\n        return false;\n    }\n\n    let mut chars = s.chars();\n    if !is_valid_identifier_begin(chars.next().unwrap()) {\n        return false;\n    }\n\n    if !chars.all(is_word_char_ascii) {\n        return false;\n    }\n\n    // NULL is not a valid identifier in SQL, any case\n    if s.eq_ignore_ascii_case(\"null\") {\n        return false;\n    }\n\n    true\n}\n\n// backQuoteIfNeed() from ClickHouse\nfn backquote_if_needed(s: &str) -> String {\n    if is_valid_identifier(s)\n        && !s.eq_ignore_ascii_case(\"distinct\")\n        && !s.eq_ignore_ascii_case(\"all\")\n        && !s.eq_ignore_ascii_case(\"table\")\n    {\n        s.to_string()\n    } else {\n        format!(\"`{}`\", s.replace('`', \"\\\\`\"))\n    }\n}\n\nfn escape_for_like(s: &str) -> String {\n    s.replace('\\\\', \"\\\\\\\\\")\n        .replace('_', \"\\\\_\")\n        .replace('%', \"\\\\%\")\n}\n\npub fn query_result_show_logs_for_row(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n    logger_names_patterns: &[&'static str],\n    view_name: &'static str,\n) {\n    let row = row.0;\n\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        let quoted = backquote_if_needed(&value);\n        let escaped_value = escape_for_like(&quoted);\n        map.insert(c.to_string(), escaped_value);\n\n        // Also provide unquoted version with \"_raw\" suffix for literal values\n        let escaped_literal = escape_for_like(&value);\n        map.insert(format!(\"{}_raw\", c), escaped_literal);\n    });\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    let view_options = context.clone().lock().unwrap().options.view.clone();\n    let logger_names = logger_names_patterns\n        .iter()\n        .map(|p| strfmt::strfmt(p, &map).unwrap())\n        .collect::<Vec<_>>();\n\n    siv.add_layer(Dialog::around(\n        LinearLayout::vertical()\n            .child(TextView::new(\"Logs:\").center())\n            .child(DummyView.fixed_height(1))\n            .child(NamedView::new(\n                view_name,\n                TextLogView::new(\n                    view_name,\n                    context,\n                    crate::interpreter::TextLogArguments {\n                        query_ids: None,\n                        logger_names: Some(logger_names),\n                        hostname: None,\n                        message_filter: None,\n                        max_level: None,\n                        start: DateTime::<Local>::from(view_options.start),\n                        end: view_options.end,\n                    },\n                ),\n            )),\n    ));\n    siv.focus_name(view_name).unwrap();\n}\n\npub trait ClickHouseSettingValue {\n    fn format_for_query(&self) -> String;\n}\n\nimpl ClickHouseSettingValue for &str {\n    fn format_for_query(&self) -> String {\n        format!(\"'{}'\", self.replace('\\'', \"\\\\'\"))\n    }\n}\n\nimpl ClickHouseSettingValue for String {\n    fn format_for_query(&self) -> String {\n        format!(\"'{}'\", self.replace('\\'', \"\\\\'\"))\n    }\n}\n\nimpl ClickHouseSettingValue for i32 {\n    fn format_for_query(&self) -> String {\n        self.to_string()\n    }\n}\n\nimpl ClickHouseSettingValue for i64 {\n    fn format_for_query(&self) -> String {\n        self.to_string()\n    }\n}\n\nimpl ClickHouseSettingValue for u32 {\n    fn format_for_query(&self) -> String {\n        self.to_string()\n    }\n}\n\nimpl ClickHouseSettingValue for u64 {\n    fn format_for_query(&self) -> String {\n        self.to_string()\n    }\n}\n\npub struct RenderFromClickHouseQueryArguments<F, T> {\n    pub context: ContextArc,\n    pub table: &'static [&'static str],\n    pub join: Option<String>,\n    pub filter: Option<&'static str>,\n    pub sort_by: &'static str,\n    pub columns: Vec<&'static str>,\n    pub columns_to_compare: Vec<&'static str>,\n    pub on_submit: Option<F>,\n    pub settings: HashMap<&'static str, T>,\n}\n\npub fn render_from_clickhouse_query<F, T>(\n    siv: &mut Cursive,\n    mut params: RenderFromClickHouseQueryArguments<F, T>,\n) where\n    F: Fn(&mut Cursive, Vec<&'static str>, view::QueryResultRow) + Send + Sync + 'static,\n    T: ClickHouseSettingValue,\n{\n    use crate::view::Navigation;\n\n    let table_alias = params.table[0];\n\n    if siv.has_view(table_alias) {\n        return;\n    }\n\n    let (cluster, selected_host, clickhouse) = {\n        let ctx = params.context.lock().unwrap();\n        (\n            ctx.options.clickhouse.cluster.is_some(),\n            ctx.selected_host.clone(),\n            ctx.clickhouse.clone(),\n        )\n    };\n\n    // Only show hostname column when in cluster mode AND no host filter is active\n    if cluster && selected_host.is_none() {\n        params.columns.insert(0, \"hostName() host\");\n        // Add \"host\" to the beginning of columns to compare\n        params.columns_to_compare.insert(0, \"host\");\n    }\n\n    let dbtable = if params.table.len() == 1 {\n        params\n            .context\n            .lock()\n            .unwrap()\n            .clickhouse\n            .get_table_name(\"system\", table_alias)\n    } else {\n        let pattern = params.table.join(\"|\");\n        format!(\"merge('system', '^({pattern})$')\")\n    };\n    let settings_str = if params.settings.is_empty() {\n        \"\".to_string()\n    } else {\n        format!(\n            \" SETTINGS {}\",\n            params\n                .settings\n                .iter()\n                .map(|kv| format!(\"{}={}\", kv.0, kv.1.format_for_query()))\n                .collect::<Vec<String>>()\n                .join(\",\")\n        )\n        .to_string()\n    };\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    let where_clause = match (params.filter, host_filter.is_empty()) {\n        (Some(filter), true) => format!(\" WHERE {}\", filter),\n        (Some(filter), false) => format!(\" WHERE {} {}\", filter, host_filter),\n        (None, false) => format!(\" WHERE 1 {}\", host_filter),\n        (None, true) => String::new(),\n    };\n\n    let query = format!(\n        \"select {} from {} as {} {}{}{}\",\n        params.columns.join(\", \"),\n        dbtable,\n        table_alias,\n        params.join.unwrap_or_default(),\n        where_clause,\n        settings_str,\n    );\n\n    siv.drop_main_view();\n\n    let mut view = view::SQLQueryView::new(\n        params.context.clone(),\n        table_alias,\n        params.sort_by,\n        params.columns.clone(),\n        params.columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot get {}\", table_alias));\n    if let Some(on_submit) = params.on_submit {\n        view.get_inner_mut().set_on_submit(on_submit);\n    }\n    view.get_inner_mut().set_title(table_alias);\n    let view = view.with_name(table_alias).full_screen();\n\n    siv.set_main_view(view);\n    siv.focus_name(table_alias).unwrap();\n}\n\npub fn query_result_show_row(siv: &mut Cursive, columns: Vec<&'static str>, row: QueryResultRow) {\n    let row = row.0;\n    let width = columns.iter().map(|c| c.len()).max().unwrap_or_default();\n    let info = columns\n        .iter()\n        .zip(row.iter())\n        .map(|(c, r)| (*c, r.to_string()))\n        .map(|(c, r)| format!(\"{:<width$}: {}\", c, r, width = width))\n        .collect::<Vec<_>>()\n        .join(\"\\n\");\n    siv.add_layer(Dialog::info(info).title(\"Details\"));\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_backquote_if_needed_valid_identifiers() {\n        // Valid simple identifiers should not be quoted\n        assert_eq!(backquote_if_needed(\"my_table\"), \"my_table\");\n        assert_eq!(backquote_if_needed(\"database1\"), \"database1\");\n        assert_eq!(backquote_if_needed(\"_private\"), \"_private\");\n        assert_eq!(backquote_if_needed(\"Table123\"), \"Table123\");\n        assert_eq!(backquote_if_needed(\"column_name_1\"), \"column_name_1\");\n    }\n\n    #[test]\n    fn test_backquote_if_needed_reserved_keywords() {\n        // Reserved keywords should be quoted (case insensitive)\n        assert_eq!(backquote_if_needed(\"table\"), \"`table`\");\n        assert_eq!(backquote_if_needed(\"TABLE\"), \"`TABLE`\");\n        assert_eq!(backquote_if_needed(\"Table\"), \"`Table`\");\n        assert_eq!(backquote_if_needed(\"distinct\"), \"`distinct`\");\n        assert_eq!(backquote_if_needed(\"DISTINCT\"), \"`DISTINCT`\");\n        assert_eq!(backquote_if_needed(\"all\"), \"`all`\");\n        assert_eq!(backquote_if_needed(\"ALL\"), \"`ALL`\");\n        assert_eq!(backquote_if_needed(\"null\"), \"`null`\");\n        assert_eq!(backquote_if_needed(\"NULL\"), \"`NULL`\");\n    }\n\n    #[test]\n    fn test_backquote_if_needed_special_characters() {\n        // Identifiers with special characters should be quoted\n        assert_eq!(backquote_if_needed(\"my-table\"), \"`my-table`\");\n        assert_eq!(backquote_if_needed(\"table.name\"), \"`table.name`\");\n        assert_eq!(backquote_if_needed(\"table name\"), \"`table name`\");\n        assert_eq!(backquote_if_needed(\"table@host\"), \"`table@host`\");\n        assert_eq!(backquote_if_needed(\"123table\"), \"`123table`\");\n        assert_eq!(backquote_if_needed(\"my$table\"), \"`my$table`\");\n    }\n\n    #[test]\n    fn test_backquote_if_needed_backtick_escaping() {\n        // Backticks in identifiers should be escaped and quoted\n        assert_eq!(backquote_if_needed(\"my`table\"), \"`my\\\\`table`\");\n        assert_eq!(backquote_if_needed(\"`table`\"), \"`\\\\`table\\\\``\");\n        assert_eq!(backquote_if_needed(\"tab`le`name\"), \"`tab\\\\`le\\\\`name`\");\n    }\n}\n"
  },
  {
    "path": "src/view/providers/mutations.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, Navigation, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::Dialog,\n};\n\npub struct MutationsViewProvider;\n\nimpl ViewProvider for MutationsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Mutations\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Mutations\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_mutations(siv, context, None, None);\n    }\n}\n\nfn get_columns(is_dialog: bool) -> Vec<&'static str> {\n    if is_dialog {\n        vec![\n            \"mutation_id\",\n            \"command\",\n            \"create_time\",\n            \"parts_to_do parts\",\n            \"is_done\",\n            \"latest_fail_reason\",\n            \"latest_fail_time\",\n        ]\n    } else {\n        vec![\n            \"database\",\n            \"table\",\n            \"mutation_id\",\n            \"command\",\n            \"create_time\",\n            \"parts_to_do parts\",\n            \"is_done\",\n            \"latest_fail_reason\",\n            \"latest_fail_time\",\n        ]\n    }\n}\n\nfn build_query(\n    context: &ContextArc,\n    filters: &super::TableFilterParams,\n    is_dialog: bool,\n) -> String {\n    let columns = get_columns(is_dialog);\n    let mut where_clauses = vec![\"is_done = 0\".to_string()];\n    where_clauses.extend(filters.build_where_clauses());\n\n    let (mutations_dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.clickhouse.get_table_name(\"system\", \"mutations\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    format!(\n        \"select {} from {} as mutations WHERE {}\",\n        columns.join(\", \"),\n        mutations_dbtable,\n        where_clauses.join(\" AND \"),\n    )\n}\n\nfn show_mutations(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let view_name = \"mutations\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = super::TableFilterParams::new(database, table, \"mutations\", \"Mutations\");\n    let columns = get_columns(false);\n    let query = build_query(&context, &filters, false);\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"latest_fail_time\",\n        columns,\n        vec![\"database\", \"table\", \"mutation_id\"],\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    // TODO:\n    // - on_submit show assigned merges (but first, need to expose enough info in system tables)\n    // - sort by create_time OR latest_fail_time\n    view.get_inner_mut()\n        .set_on_submit(super::query_result_show_row);\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_mutations_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let filters = super::TableFilterParams::new(database, table, \"mutations\", \"Mutations\");\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let columns = get_columns(true);\n    let query = build_query(&context, &filters, true);\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"latest_fail_time\",\n        columns,\n        vec![\"mutation_id\"],\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view\n        .get_inner_mut()\n        .set_on_submit(super::query_result_show_row);\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30))).title(\"Mutations\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/object_storage_queue.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::ViewProvider,\n};\nuse cursive::Cursive;\nuse std::collections::HashMap;\n\nfn show_queue(siv: &mut Cursive, context: ContextArc, table: &'static [&'static str]) {\n    let columns = vec![\n        \"file_name\",\n        \"rows_processed\",\n        \"status\",\n        \"assumeNotNull(processing_start_time) start_time\",\n        \"exception\",\n    ];\n\n    // TODO: on_submit show last related log messages\n    super::render_from_clickhouse_query(\n        siv,\n        super::RenderFromClickHouseQueryArguments {\n            context,\n            table,\n            join: None,\n            filter: None,\n            sort_by: \"start_time\",\n            columns,\n            columns_to_compare: vec![\"file_name\"],\n            on_submit: Some(super::query_result_show_row),\n            settings: HashMap::<&str, i32>::new(),\n        },\n    );\n}\n\npub struct S3QueueViewProvider;\n\nimpl ViewProvider for S3QueueViewProvider {\n    fn name(&self) -> &'static str {\n        \"S3Queue\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::S3Queue\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_queue(siv, context, &[\"s3queue_metadata_cache\", \"s3queue\"]);\n    }\n}\n\npub struct AzureQueueViewProvider;\n\nimpl ViewProvider for AzureQueueViewProvider {\n    fn name(&self) -> &'static str {\n        \"AzureQueue\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::AzureQueue\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_queue(siv, context, &[\"azure_queue_metadata_cache\", \"azure_queue\"]);\n    }\n}\n"
  },
  {
    "path": "src/view/providers/part_log.rs",
    "content": "use crate::{\n    actions::ActionDescription,\n    common::RelativeDateTime,\n    interpreter::{ContextArc, TextLogArguments, options::ChDigViews},\n    utils::fuzzy_actions,\n    view::{self, Navigation, TextLogView, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    event::Event,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct PartLogViewProvider;\n\nimpl ViewProvider for PartLogViewProvider {\n    fn name(&self) -> &'static str {\n        \"Part Log\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::PartLog\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_part_log(siv, context, None, None, None);\n    }\n}\n\nstruct FilterParams {\n    database: Option<String>,\n    table: Option<String>,\n    table_uuid: Option<String>,\n}\n\nimpl FilterParams {\n    fn build_where_clauses(&self) -> Vec<String> {\n        let mut clauses = vec![\n            \"event_date BETWEEN toDate(start_) AND toDate(end_)\".to_string(),\n            \"event_time BETWEEN toDateTime(start_) AND toDateTime(end_)\".to_string(),\n            // Useful only for merge vizualization\n            \"event_type != 'MergePartsStart'\".to_string(),\n        ];\n\n        if let Some(ref database) = self.database {\n            clauses.push(format!(\"database = '{}'\", database.replace('\\'', \"''\")));\n        }\n        if let Some(ref table) = self.table {\n            clauses.push(format!(\"table = '{}'\", table.replace('\\'', \"''\")));\n        }\n        if let Some(ref table_uuid) = self.table_uuid {\n            clauses.push(format!(\"table_uuid = '{}'\", table_uuid.replace('\\'', \"''\")));\n        }\n\n        clauses\n    }\n\n    fn build_title(&self, for_dialog: bool) -> String {\n        match (&self.database, &self.table) {\n            (Some(db), Some(tbl)) => {\n                if for_dialog {\n                    format!(\"Part log for: {}.{}\", db, tbl)\n                } else {\n                    format!(\"Part Log: {}.{}\", db, tbl)\n                }\n            }\n            (Some(db), None) => {\n                if for_dialog {\n                    format!(\"Part log for database: {}\", db)\n                } else {\n                    format!(\"Part Log: {}\", db)\n                }\n            }\n            (None, Some(tbl)) => {\n                if for_dialog {\n                    format!(\"Part log for table: {}\", tbl)\n                } else {\n                    format!(\"Part Log: table {}\", tbl)\n                }\n            }\n            (None, None) => \"Part Log\".to_string(),\n        }\n    }\n\n    fn generate_view_name(&self) -> String {\n        format!(\n            \"part_log_{}_{}_{}\",\n            self.database.as_deref().unwrap_or(\"any\"),\n            self.table.as_deref().unwrap_or(\"any\"),\n            self.table_uuid.as_deref().unwrap_or(\"any\")\n        )\n    }\n}\n\nfn build_query(context: &ContextArc, filters: &FilterParams, is_dialog: bool) -> String {\n    let (view_options, limit, dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.options.view.clone(),\n            ctx.options.clickhouse.limit,\n            ctx.clickhouse.get_log_table_name(\"system\", \"part_log\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let start_sql = view_options\n        .start\n        .to_sql_datetime_64()\n        .unwrap_or_else(|| \"now() - INTERVAL 1 HOUR\".to_string());\n    let end_sql = view_options\n        .end\n        .to_sql_datetime_64()\n        .unwrap_or_else(|| \"now()\".to_string());\n\n    let mut where_clauses = filters.build_where_clauses();\n\n    let host_filter = clickhouse.get_log_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    let select_clause = if is_dialog {\n        r#\"event_time,\n            event_type::String event_type,\n            part_name,\n            merge_algorithm::String merge_algorithm,\n            part_type,\n            rows,\n            size_in_bytes,\n            duration_ms,\n            peak_memory_usage,\n            exception,\n            table_uuid::String _table_uuid\"#\n    } else {\n        r#\"event_time,\n            event_type::String event_type,\n            database,\n            table,\n            part_name,\n            merge_algorithm::String merge_algorithm,\n            part_type,\n            rows,\n            size_in_bytes,\n            duration_ms,\n            peak_memory_usage,\n            exception,\n            table_uuid::String _table_uuid\"#\n    };\n\n    format!(\n        r#\"\n        WITH {start} AS start_, {end} AS end_\n        SELECT\n            {select_clause}\n        FROM {dbtable}\n        WHERE\n            {where_clause}\n        ORDER BY event_time DESC\n        LIMIT {limit}\n        \"#,\n        start = start_sql,\n        end = end_sql,\n        select_clause = select_clause,\n        dbtable = dbtable,\n        where_clause = where_clauses.join(\" AND \"),\n        limit = limit,\n    )\n}\n\nfn get_columns(is_dialog: bool) -> (Vec<&'static str>, Vec<&'static str>) {\n    let columns = if is_dialog {\n        vec![\n            \"event_time\",\n            \"event_type\",\n            \"part_name\",\n            \"merge_algorithm\",\n            \"part_type\",\n            \"rows\",\n            \"size_in_bytes\",\n            \"duration_ms\",\n            \"peak_memory_usage\",\n            \"exception\",\n            \"_table_uuid\",\n        ]\n    } else {\n        vec![\n            \"event_time\",\n            \"event_type\",\n            \"database\",\n            \"table\",\n            \"part_name\",\n            \"merge_algorithm\",\n            \"part_type\",\n            \"rows\",\n            \"size_in_bytes\",\n            \"duration_ms\",\n            \"peak_memory_usage\",\n            \"exception\",\n            \"_table_uuid\",\n        ]\n    };\n    let columns_to_compare = vec![\"event_time\", \"event_type\", \"part_name\"];\n    (columns, columns_to_compare)\n}\n\nfn show_part_logs(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let mut map = HashMap::new();\n    columns.iter().zip(row.0.iter()).for_each(|(c, r)| {\n        map.insert(c.to_string(), r);\n    });\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    siv.add_layer(Dialog::around(\n        LinearLayout::vertical()\n            .child(TextView::new(\"Logs:\").center())\n            .child(DummyView.fixed_height(1))\n            .child(NamedView::new(\n                \"part_logs\",\n                TextLogView::new(\n                    \"part_logs\",\n                    context,\n                    TextLogArguments {\n                        query_ids: Some(vec![format!(\n                            \"{}::{}\",\n                            map[\"_table_uuid\"].to_string(),\n                            map[\"part_name\"].to_string()\n                        )]),\n                        logger_names: None,\n                        hostname: None,\n                        message_filter: None,\n                        max_level: None,\n                        start: map[\"event_time\"].as_datetime().unwrap(),\n                        end: RelativeDateTime::new(None),\n                    },\n                ),\n            )),\n    ));\n    siv.focus_name(\"part_logs\").unwrap();\n}\n\nfn show_part_details(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let width = columns.iter().map(|c| c.len()).max().unwrap_or_default();\n    let info = columns\n        .iter()\n        .filter_map(|c| map.get(*c).map(|v| (*c, v)))\n        .map(|(c, v)| format!(\"{:<width$}: {}\", c, v, width = width))\n        .collect::<Vec<_>>()\n        .join(\"\\n\");\n\n    siv.add_layer(Dialog::info(info).title(\"Part Log Details\"));\n}\n\nfn part_log_action_callback(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let actions = vec![\n        ActionDescription {\n            text: \"Show part logs\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show part details\",\n            event: Event::Unknown(vec![]),\n        },\n    ];\n\n    let columns_clone = columns.clone();\n    let row_clone = row.clone();\n\n    fuzzy_actions(siv, actions, move |siv, selected| match selected.as_str() {\n        \"Show part logs\" => {\n            show_part_logs(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show part details\" => {\n            show_part_details(siv, columns_clone.clone(), row_clone.clone());\n        }\n        _ => {}\n    });\n}\n\npub fn show_part_log(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n    table_uuid: Option<String>,\n) {\n    let view_name = \"part_log\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = FilterParams {\n        database,\n        table,\n        table_uuid,\n    };\n\n    let query = build_query(&context, &filters, false);\n    let (columns, columns_to_compare) = get_columns(false);\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"event_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    view.get_inner_mut().set_on_submit(part_log_action_callback);\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_part_log_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n    table_uuid: Option<String>,\n) {\n    let filters = FilterParams {\n        database,\n        table,\n        table_uuid,\n    };\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let query = build_query(&context, &filters, true);\n    let (columns, columns_to_compare) = get_columns(true);\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"event_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view\n        .get_inner_mut()\n        .set_on_submit(part_log_action_callback);\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30))).title(\"Part Log\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/queries.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{self, Navigation, ProcessesType, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n};\n\npub struct ProcessesViewProvider;\n\nimpl ViewProvider for ProcessesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Processes\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Queries\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"processes\") {\n            return;\n        }\n\n        siv.drop_main_view();\n        siv.set_main_view(\n            view::QueriesView::new(\n                context.clone(),\n                ProcessesType::ProcessList,\n                \"processes\",\n                \"Queries\",\n            )\n            .with_name(\"processes\")\n            .full_screen(),\n        );\n        siv.focus_name(\"processes\").unwrap();\n    }\n}\n\npub struct SlowQueryLogViewProvider;\n\nimpl ViewProvider for SlowQueryLogViewProvider {\n    fn name(&self) -> &'static str {\n        \"Slow queries\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::SlowQueries\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"slow_query_log\") {\n            return;\n        }\n\n        siv.drop_main_view();\n        siv.set_main_view(\n            view::QueriesView::new(\n                context.clone(),\n                ProcessesType::SlowQueryLog,\n                \"slow_query_log\",\n                \"Slow queries\",\n            )\n            .with_name(\"slow_query_log\")\n            .full_screen(),\n        );\n        siv.focus_name(\"slow_query_log\").unwrap();\n    }\n}\n\npub struct LastQueryLogViewProvider;\n\nimpl ViewProvider for LastQueryLogViewProvider {\n    fn name(&self) -> &'static str {\n        \"Last queries\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::LastQueries\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"last_query_log\") {\n            return;\n        }\n\n        siv.drop_main_view();\n        siv.set_main_view(\n            view::QueriesView::new(\n                context.clone(),\n                ProcessesType::LastQueryLog,\n                \"last_query_log\",\n                \"Last queries\",\n            )\n            .with_name(\"last_query_log\")\n            .full_screen(),\n        );\n        siv.focus_name(\"last_query_log\").unwrap();\n    }\n}\n"
  },
  {
    "path": "src/view/providers/replicas.rs",
    "content": "use crate::{\n    interpreter::{ClickHouseAvailableQuirks, ContextArc, options::ChDigViews},\n    view::{self, Navigation, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n};\n\npub struct ReplicasViewProvider;\n\nimpl ViewProvider for ReplicasViewProvider {\n    fn name(&self) -> &'static str {\n        \"Replicas\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Replicas\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"replicas\") {\n            return;\n        }\n\n        let has_uuid = context\n            .clone()\n            .lock()\n            .unwrap()\n            .clickhouse\n            .quirks\n            .has(ClickHouseAvailableQuirks::SystemReplicasUUID);\n        let mut columns = vec![\n            \"database\",\n            \"table\",\n            \"is_readonly readonly\",\n            \"parts_to_check\",\n            \"queue_size queue\",\n            \"absolute_delay delay\",\n            \"last_queue_update last_update\",\n        ];\n\n        if has_uuid {\n            columns.push(\"uuid::String _uuid\");\n        }\n\n        let (cluster, dbtable, clickhouse, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.options.clickhouse.cluster.is_some(),\n                ctx.clickhouse.get_table_name(\"system\", \"replicas\"),\n                ctx.clickhouse.clone(),\n                ctx.selected_host.clone(),\n            )\n        };\n\n        // Only show hostname column when in cluster mode AND no host filter is active\n        let columns_to_compare = if cluster && selected_host.is_none() {\n            columns.insert(0, \"hostName() host\");\n            vec![\"host\", \"database\", \"table\"]\n        } else {\n            vec![\"database\", \"table\"]\n        };\n\n        let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n        let where_clause = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\"WHERE 1 {}\", host_filter)\n        };\n\n        let query = format!(\n            \"SELECT DISTINCT ON (database, table, zookeeper_path) {} FROM {} {} ORDER BY queue_size DESC, database, table\",\n            columns.join(\", \"),\n            dbtable,\n            where_clause,\n        );\n\n        siv.drop_main_view();\n\n        let mut view = view::SQLQueryView::new(\n            context.clone(),\n            \"replicas\",\n            \"queue\",\n            columns.clone(),\n            columns_to_compare,\n            query,\n        )\n        .unwrap_or_else(|_| panic!(\"Cannot get replicas\"));\n\n        let logger_names_patterns = if has_uuid {\n            vec![\"{database}.{table} ({_uuid_raw})\"]\n        } else {\n            vec![\"{database}.{table} %\"]\n        };\n        let replicas_logs_callback =\n            move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n                super::query_result_show_logs_for_row(\n                    siv,\n                    columns,\n                    row,\n                    &logger_names_patterns,\n                    \"replica_logs\",\n                );\n            };\n        view.get_inner_mut().set_on_submit(replicas_logs_callback);\n        view.get_inner_mut().set_title(\"Replicas\");\n\n        siv.set_main_view(view.with_name(\"replicas\").full_screen());\n        siv.focus_name(\"replicas\").unwrap();\n    }\n}\n"
  },
  {
    "path": "src/view/providers/replicated_fetches.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::ViewProvider,\n};\nuse cursive::Cursive;\nuse std::collections::HashMap;\n\npub struct ReplicatedFetchesViewProvider;\n\nimpl ViewProvider for ReplicatedFetchesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Fetches\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::ReplicatedFetches\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        let columns = vec![\n            \"database\",\n            \"table\",\n            \"result_part_name part\",\n            \"elapsed\",\n            \"progress\",\n            \"total_size_bytes_compressed size\",\n            \"bytes_read_compressed bytes\",\n        ];\n\n        // TODO: on_submit show last related log messages\n        super::render_from_clickhouse_query(\n            siv,\n            super::RenderFromClickHouseQueryArguments {\n                context,\n                table: &[\"replicated_fetches\"],\n                join: None,\n                filter: None,\n                sort_by: \"elapsed\",\n                columns,\n                columns_to_compare: vec![\"database\", \"table\", \"part\"],\n                on_submit: Some(super::query_result_show_row),\n                settings: HashMap::<&str, i32>::new(),\n            },\n        );\n    }\n}\n"
  },
  {
    "path": "src/view/providers/replication_queue.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::ViewProvider,\n};\nuse cursive::Cursive;\nuse std::collections::HashMap;\n\npub struct ReplicationQueueViewProvider;\n\nimpl ViewProvider for ReplicationQueueViewProvider {\n    fn name(&self) -> &'static str {\n        \"Replication queue\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::ReplicationQueue\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        let columns = vec![\n            \"database\",\n            \"table\",\n            \"type\",\n            \"new_part_name part\",\n            \"create_time\",\n            \"is_currently_executing executing\",\n            \"num_tries tries\",\n            \"last_exception exception\",\n            \"num_postponed postponed\",\n            \"postpone_reason reason\",\n        ];\n\n        // TODO: on_submit show last related log messages\n        super::render_from_clickhouse_query(\n            siv,\n            super::RenderFromClickHouseQueryArguments {\n                context,\n                table: &[\"replication_queue\"],\n                join: None,\n                filter: None,\n                sort_by: \"tries\",\n                columns,\n                columns_to_compare: vec![\"database\", \"table\", \"type\"],\n                on_submit: Some(super::query_result_show_row),\n                settings: HashMap::<&str, i32>::new(),\n            },\n        );\n    }\n}\n"
  },
  {
    "path": "src/view/providers/server_logs.rs",
    "content": "use crate::{\n    interpreter::{ContextArc, options::ChDigViews},\n    view::{Navigation, TextLogView, ViewProvider},\n};\nuse chrono::{DateTime, Local};\nuse cursive::{\n    Cursive,\n    view::{Nameable, Resizable},\n    views::{DummyView, LinearLayout, TextView},\n};\n\npub struct ServerLogsViewProvider;\n\nimpl ViewProvider for ServerLogsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Server logs\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::ServerLogs\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"server_logs\") {\n            return;\n        }\n\n        let (view_options, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (ctx.options.view.clone(), ctx.selected_host.clone())\n        };\n\n        siv.drop_main_view();\n        siv.set_main_view(\n            LinearLayout::vertical()\n                .child(TextView::new(\"Server logs:\").center())\n                .child(DummyView.fixed_height(1))\n                .child(\n                    TextLogView::new(\n                        \"server_logs\",\n                        context,\n                        crate::interpreter::TextLogArguments {\n                            query_ids: None,\n                            logger_names: None,\n                            hostname: selected_host,\n                            message_filter: None,\n                            max_level: None,\n                            start: DateTime::<Local>::from(view_options.start),\n                            end: view_options.end,\n                        },\n                    )\n                    .with_name(\"server_logs\")\n                    .full_screen(),\n                ),\n        );\n        siv.focus_name(\"server_logs\").unwrap();\n    }\n}\n"
  },
  {
    "path": "src/view/providers/table_parts.rs",
    "content": "use crate::{\n    actions::ActionDescription,\n    common::RelativeDateTime,\n    interpreter::{ContextArc, TextLogArguments, options::ChDigViews},\n    utils::fuzzy_actions,\n    view::{self, Navigation, TextLogView, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    event::Event,\n    view::{Nameable, Resizable},\n    views::{Dialog, DummyView, LinearLayout, NamedView, TextView},\n};\nuse std::collections::HashMap;\n\npub struct TablePartsViewProvider;\n\nimpl ViewProvider for TablePartsViewProvider {\n    fn name(&self) -> &'static str {\n        \"Table Parts\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::TableParts\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        show_table_parts(siv, context, None, None);\n    }\n}\n\nfn build_query(\n    context: &ContextArc,\n    filters: &super::TableFilterParams,\n    is_dialog: bool,\n) -> String {\n    let (limit, parts_dbtable, tables_dbtable, clickhouse, selected_host) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.options.clickhouse.limit,\n            ctx.clickhouse.get_table_name(\"system\", \"parts\"),\n            ctx.clickhouse.get_table_name(\"system\", \"tables\"),\n            ctx.clickhouse.clone(),\n            ctx.selected_host.clone(),\n        )\n    };\n\n    let mut where_clauses = filters.build_where_clauses();\n\n    let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n    if !host_filter.is_empty() {\n        where_clauses.push(format!(\"1 {}\", host_filter));\n    }\n\n    let where_clause = if where_clauses.is_empty() {\n        String::new()\n    } else {\n        format!(\" WHERE {}\", where_clauses.join(\" AND \"))\n    };\n\n    let select_clause = if is_dialog {\n        r#\"parts.name,\n            parts.partition,\n            parts.rows,\n            parts.bytes_on_disk,\n            parts.data_compressed_bytes,\n            parts.data_uncompressed_bytes,\n            parts.modification_time,\n            parts.active,\n            tables.uuid::String _table_uuid\"#\n    } else {\n        r#\"parts.database,\n            parts.table,\n            parts.name,\n            parts.partition,\n            parts.rows,\n            parts.bytes_on_disk,\n            parts.data_compressed_bytes,\n            parts.data_uncompressed_bytes,\n            parts.modification_time,\n            parts.active,\n            tables.uuid::String _table_uuid\"#\n    };\n\n    format!(\n        r#\"\n        SELECT\n            {select_clause}\n        FROM {parts_dbtable} as parts\n        LEFT JOIN (SELECT DISTINCT ON (database, name) database, name, uuid FROM {tables_dbtable}) tables\n            ON parts.database = tables.database AND parts.table = tables.name\n        {where_clause}\n        ORDER BY parts.modification_time DESC\n        LIMIT {limit}\n        SETTINGS allow_experimental_analyzer=1\n        \"#,\n        select_clause = select_clause,\n        parts_dbtable = parts_dbtable,\n        tables_dbtable = tables_dbtable,\n        where_clause = where_clause,\n        limit = limit,\n    )\n}\n\nfn get_columns(is_dialog: bool) -> (Vec<&'static str>, Vec<&'static str>) {\n    let columns = if is_dialog {\n        vec![\n            \"name\",\n            \"partition\",\n            \"rows\",\n            \"bytes_on_disk\",\n            \"data_compressed_bytes\",\n            \"data_uncompressed_bytes\",\n            \"modification_time\",\n            \"active\",\n            \"_table_uuid\",\n        ]\n    } else {\n        vec![\n            \"database\",\n            \"table\",\n            \"name\",\n            \"partition\",\n            \"rows\",\n            \"bytes_on_disk\",\n            \"data_compressed_bytes\",\n            \"data_uncompressed_bytes\",\n            \"modification_time\",\n            \"active\",\n            \"_table_uuid\",\n        ]\n    };\n    let columns_to_compare = vec![\"name\"];\n    (columns, columns_to_compare)\n}\n\nfn show_part_logs(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let mut map = HashMap::new();\n    columns.iter().zip(row.0.iter()).for_each(|(c, r)| {\n        map.insert(c.to_string(), r);\n    });\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    siv.add_layer(Dialog::around(\n        LinearLayout::vertical()\n            .child(TextView::new(\"Logs:\").center())\n            .child(DummyView.fixed_height(1))\n            .child(NamedView::new(\n                \"part_logs\",\n                TextLogView::new(\n                    \"part_logs\",\n                    context,\n                    TextLogArguments {\n                        query_ids: Some(vec![format!(\n                            \"{}::{}\",\n                            map[\"_table_uuid\"].to_string(),\n                            map[\"name\"].to_string()\n                        )]),\n                        logger_names: None,\n                        hostname: None,\n                        message_filter: None,\n                        max_level: None,\n                        start: map[\"modification_time\"].as_datetime().unwrap(),\n                        end: RelativeDateTime::new(None),\n                    },\n                ),\n            )),\n    ));\n    siv.focus_name(\"part_logs\").unwrap();\n}\n\nfn show_part_details(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let width = columns.iter().map(|c| c.len()).max().unwrap_or_default();\n    let info = columns\n        .iter()\n        .filter_map(|c| map.get(*c).map(|v| (*c, v)))\n        .map(|(c, v)| format!(\"{:<width$}: {}\", c, v, width = width))\n        .collect::<Vec<_>>()\n        .join(\"\\n\");\n\n    siv.add_layer(Dialog::info(info).title(\"Part Details\"));\n}\n\nfn table_parts_action_callback(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let actions = vec![\n        ActionDescription {\n            text: \"Show part logs\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show part details\",\n            event: Event::Unknown(vec![]),\n        },\n    ];\n\n    let columns_clone = columns.clone();\n    let row_clone = row.clone();\n\n    fuzzy_actions(siv, actions, move |siv, selected| match selected.as_str() {\n        \"Show part logs\" => {\n            show_part_logs(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show part details\" => {\n            show_part_details(siv, columns_clone.clone(), row_clone.clone());\n        }\n        _ => {}\n    });\n}\n\npub fn show_table_parts(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let view_name = \"table_parts\";\n\n    if siv.has_view(view_name) {\n        return;\n    }\n\n    let filters = super::TableFilterParams::new(database, table, \"table_parts\", \"Table Parts\");\n\n    let query = build_query(&context, &filters, false);\n    let (columns, columns_to_compare) = get_columns(false);\n\n    let mut view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"modification_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    view.get_inner_mut()\n        .set_on_submit(table_parts_action_callback);\n\n    view.get_inner_mut().set_title(filters.build_title(false));\n\n    siv.drop_main_view();\n    siv.set_main_view(view.with_name(view_name).full_screen());\n    siv.focus_name(view_name).unwrap();\n}\n\npub fn show_table_parts_dialog(\n    siv: &mut Cursive,\n    context: ContextArc,\n    database: Option<String>,\n    table: Option<String>,\n) {\n    let filters = super::TableFilterParams::new(database, table, \"table_parts\", \"Table Parts\");\n\n    let view_name: &'static str = Box::leak(filters.generate_view_name().into_boxed_str());\n    let query = build_query(&context, &filters, true);\n    let (columns, columns_to_compare) = get_columns(true);\n\n    let mut sql_view = view::SQLQueryView::new(\n        context.clone(),\n        view_name,\n        \"modification_time\",\n        columns,\n        columns_to_compare,\n        query,\n    )\n    .unwrap_or_else(|_| panic!(\"Cannot create {}\", view_name));\n\n    sql_view\n        .get_inner_mut()\n        .set_on_submit(table_parts_action_callback);\n    sql_view\n        .get_inner_mut()\n        .set_title(filters.build_title(true));\n\n    siv.add_layer(\n        Dialog::around(sql_view.with_name(view_name).min_size((140, 30))).title(\"Table Parts\"),\n    );\n}\n"
  },
  {
    "path": "src/view/providers/tables.rs",
    "content": "use crate::{\n    actions::ActionDescription,\n    interpreter::{ClickHouseAvailableQuirks, ContextArc, WorkerEvent, options::ChDigViews},\n    utils::fuzzy_actions,\n    view::{self, Navigation, ViewProvider},\n};\nuse cursive::{\n    Cursive,\n    event::Event,\n    view::{Nameable, Resizable},\n};\nuse std::collections::HashMap;\n\npub struct TablesViewProvider;\n\nimpl ViewProvider for TablesViewProvider {\n    fn name(&self) -> &'static str {\n        \"Tables\"\n    }\n\n    fn view_type(&self) -> ChDigViews {\n        ChDigViews::Tables\n    }\n\n    fn show(&self, siv: &mut Cursive, context: ContextArc) {\n        if siv.has_view(\"tables\") {\n            return;\n        }\n\n        let mut columns = vec![\n            \"database\",\n            \"table\",\n            \"engine\",\n            \"uuid::String _uuid\",\n            \"assumeNotNull(total_bytes) total_bytes\",\n            \"assumeNotNull(total_rows) total_rows\",\n        ];\n\n        let (cluster, has_background_schedule_pool, dbtable, clickhouse, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (\n                ctx.options.clickhouse.cluster.is_some(),\n                ctx.clickhouse\n                    .quirks\n                    .has(ClickHouseAvailableQuirks::SystemBackgroundSchedulePool),\n                ctx.clickhouse.get_table_name_no_history(\"system\", \"tables\"),\n                ctx.clickhouse.clone(),\n                ctx.selected_host.clone(),\n            )\n        };\n\n        // Only show hostname column when in cluster mode AND no host filter is active\n        let columns_to_compare = if cluster && selected_host.is_none() {\n            columns.insert(0, \"hostName() host\");\n            vec![\"host\", \"database\", \"table\"]\n        } else {\n            vec![\"database\", \"table\"]\n        };\n\n        if has_background_schedule_pool {\n            columns.push(\"tasks\");\n        }\n\n        let host_filter = clickhouse.get_host_filter_clause(selected_host.as_ref());\n        let host_where = if host_filter.is_empty() {\n            String::new()\n        } else {\n            format!(\"AND 1 {}\", host_filter)\n        };\n\n        let query = if has_background_schedule_pool {\n            format!(\n                r#\"\n                SELECT DISTINCT ON (tables.database, tables.table, tables.uuid) {}\n                FROM {} tables\n                JOIN (SELECT table_uuid, count() tasks FROM system.background_schedule_pool GROUP BY table_uuid) bg ON tables.uuid = bg.table_uuid\n                WHERE\n                    engine NOT LIKE 'System%'\n                    AND tables.database NOT IN ('INFORMATION_SCHEMA', 'information_schema')\n                    {}\n                ORDER BY database, table, total_bytes DESC\n                \"#,\n                columns.join(\", \"),\n                dbtable,\n                host_where,\n            )\n        } else {\n            format!(\n                r#\"\n                SELECT DISTINCT ON (database, table, uuid) {}\n                FROM {}\n                WHERE\n                    engine NOT LIKE 'System%'\n                    AND database NOT IN ('INFORMATION_SCHEMA', 'information_schema')\n                    {}\n                ORDER BY database, table, total_bytes DESC\n                \"#,\n                columns.join(\", \"),\n                dbtable,\n                host_where,\n            )\n        };\n\n        siv.drop_main_view();\n\n        let mut view = view::SQLQueryView::new(\n            context.clone(),\n            \"tables\",\n            \"total_bytes\",\n            columns.clone(),\n            columns_to_compare,\n            query,\n        )\n        .unwrap_or_else(|_| panic!(\"Cannot get tables\"));\n\n        let logger_names_patterns = vec![\"%{database}.{table}%\", \"%{_uuid_raw}%\"];\n        let tables_action_callback =\n            move |siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow| {\n                show_table_actions(siv, columns, row, &logger_names_patterns);\n            };\n        view.get_inner_mut().set_on_submit(tables_action_callback);\n        view.get_inner_mut().set_title(\"Tables\");\n\n        siv.set_main_view(view.with_name(\"tables\").full_screen());\n        siv.focus_name(\"tables\").unwrap();\n    }\n}\n\nfn show_table_actions(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n    logger_names_patterns: &[&'static str],\n) {\n    let actions = vec![\n        ActionDescription {\n            text: \"Show table logs\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table background tasks\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table background tasks log\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table parts\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show asynchronous inserts\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table merges\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table mutations\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"Show table part log\",\n            event: Event::Unknown(vec![]),\n        },\n        ActionDescription {\n            text: \"SHOW CREATE TABLE\",\n            event: Event::Unknown(vec![]),\n        },\n    ];\n\n    let logger_names_patterns = logger_names_patterns.to_vec();\n    let columns_clone = columns.clone();\n    let row_clone = row.clone();\n\n    // TODO: Almost all table table from this list can be implemented:\n    //\n    //   select table from system.columns where name = 'table' and database = 'system'\n    //\n    fuzzy_actions(siv, actions, move |siv, selected| match selected.as_str() {\n        \"Show table logs\" => {\n            show_table_logs(\n                siv,\n                columns_clone.clone(),\n                row_clone.clone(),\n                &logger_names_patterns,\n            );\n        }\n        \"Show table parts\" => {\n            show_table_parts(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show asynchronous inserts\" => {\n            show_table_asynchronous_inserts(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show table merges\" => {\n            show_table_merges(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show table mutations\" => {\n            show_table_mutations(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show table background tasks\" => {\n            show_table_background_tasks(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show table background tasks log\" => {\n            show_table_background_tasks_logs(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"Show table part log\" => {\n            show_table_part_log(siv, columns_clone.clone(), row_clone.clone());\n        }\n        \"SHOW CREATE TABLE\" => {\n            show_create_table(siv, columns_clone.clone(), row_clone.clone());\n        }\n        _ => {}\n    });\n}\n\nfn show_create_table(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map\n        .get(\"database\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let table = map.get(\"table\").map(|s| s.to_owned()).unwrap_or_default();\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    context\n        .lock()\n        .unwrap()\n        .worker\n        .send(true, WorkerEvent::ShowCreateTable(database, table));\n}\n\nfn show_table_logs(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n    logger_names_patterns: &[&'static str],\n) {\n    super::query_result_show_logs_for_row(siv, columns, row, logger_names_patterns, \"table_logs\");\n}\n\nfn show_table_background_tasks(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::background_schedule_pool::show_background_schedule_pool_dialog(\n        siv, context, database, table,\n    );\n}\n\nfn show_table_background_tasks_logs(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::background_schedule_pool_log::show_background_schedule_pool_log_dialog(\n        siv, context, None, database, table,\n    );\n}\n\nfn show_table_parts(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map\n        .get(\"database\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let table = map.get(\"table\").map(|s| s.to_owned()).unwrap_or_default();\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    context\n        .lock()\n        .unwrap()\n        .worker\n        .send(true, WorkerEvent::TableParts(database, table));\n}\n\nfn show_table_asynchronous_inserts(\n    siv: &mut Cursive,\n    columns: Vec<&'static str>,\n    row: view::QueryResultRow,\n) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map\n        .get(\"database\")\n        .map(|s| s.to_owned())\n        .unwrap_or_default();\n    let table = map.get(\"table\").map(|s| s.to_owned()).unwrap_or_default();\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    context\n        .lock()\n        .unwrap()\n        .worker\n        .send(true, WorkerEvent::AsynchronousInserts(database, table));\n}\n\nfn show_table_merges(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::merges::show_merges_dialog(siv, context, database, table);\n}\n\nfn show_table_mutations(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::mutations::show_mutations_dialog(siv, context, database, table);\n}\n\nfn show_table_part_log(siv: &mut Cursive, columns: Vec<&'static str>, row: view::QueryResultRow) {\n    let row_data = row.0;\n    let mut map = HashMap::<String, String>::new();\n    columns.iter().zip(row_data.iter()).for_each(|(c, r)| {\n        let value = r.to_string();\n        map.insert(c.to_string(), value);\n    });\n\n    let database = map.get(\"database\").map(|s| s.to_owned());\n    let table = map.get(\"table\").map(|s| s.to_owned());\n    let table_uuid = map.get(\"_uuid\").map(|s| s.to_owned());\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n\n    super::part_log::show_part_log_dialog(siv, context, database, table, table_uuid);\n}\n"
  },
  {
    "path": "src/view/queries_view.rs",
    "content": "use anyhow::{Error, Result};\nuse chrono::{DateTime, Local, TimeDelta};\nuse cursive::view::Scrollable;\nuse std::cmp::Ordering;\nuse std::collections::{HashMap, HashSet};\nuse std::mem::take;\nuse std::sync::{Arc, Mutex};\n\nuse cursive::traits::{Nameable, Resizable};\nuse cursive::utils::markup::StyledString;\nuse cursive::{\n    Cursive,\n    event::{Callback, Event, EventResult},\n    inner_getters,\n    theme::{BaseColor, Color, Style as CursiveStyle},\n    view::ViewWrapper,\n    views::{self, Dialog, OnEventView},\n};\nuse size::{Base, SizeFormatter, Style};\n\nuse crate::common::RelativeDateTime;\nuse crate::view::show_bottom_prompt;\nuse crate::{\n    interpreter::{\n        BackgroundRunner, ContextArc, Query, TextLogArguments, WorkerEvent, clickhouse::Columns,\n        clickhouse::TraceType, options::ViewOptions,\n    },\n    utils::{edit_query, find_common_hostname_prefix_and_suffix, get_query},\n    view::table_view::TableView,\n    view::{QueryView, SQLQueryView, TableViewItem, TextLogView},\n    wrap_impl_no_move,\n};\n\n// ClickHouse may flush some system.* tables after system.query_log, likely it is only a precision\n// error, so 1 second should be enough.\nconst QUERY_TIME_DRIFT_BUFFER_SECONDS: i64 = 1;\n\n// count() OVER (PARTITION BY initial_query_id)\ntype QueryKey = (String, String); // (query_id, host_name)\n\nfn query_key(q: &Query) -> QueryKey {\n    (q.query_id.clone(), q.host_name.clone())\n}\n\nfn queries_count_subqueries(queries: &mut HashMap<QueryKey, Query>) {\n    // <(initial_query_id, host_name), count()>\n    let mut subqueries = HashMap::<(String, String), u64>::new();\n    for v in queries.values() {\n        *subqueries\n            .entry((v.initial_query_id.clone(), v.host_name.clone()))\n            .or_default() += 1;\n    }\n    for v in queries.values_mut() {\n        v.subqueries = subqueries[&(v.initial_query_id.clone(), v.host_name.clone())];\n    }\n}\nfn sum_map<K, V>(m1: &HashMap<K, V>, m2: &HashMap<K, V>) -> HashMap<K, V>\nwhere\n    K: std::hash::Hash + std::cmp::Eq + Clone,\n    V: std::ops::AddAssign + Copy,\n{\n    let mut dst = m1.clone();\n    for (k, v) in m2.iter() {\n        if let Some(new_v) = dst.get_mut(k) {\n            *new_v += *v;\n        } else {\n            dst.insert(k.clone(), *v);\n        }\n    }\n    return dst;\n}\n// if(is_initial_query, (sumMap(ProfileEvents) OVER (PARTITION BY initial_query_id, host_name)), ProfileEvents)\nfn queries_sum_profile_events(queries: &mut HashMap<QueryKey, Query>) {\n    // <(initial_query_id, host_name), sumMap(ProfileEvents)>\n    let mut profile_events = HashMap::<(String, String), HashMap<String, u64>>::new();\n    for v in queries.values() {\n        let key = (v.initial_query_id.clone(), v.host_name.clone());\n        if let Some(pe) = profile_events.get_mut(&key) {\n            *pe = sum_map(pe, &v.profile_events);\n        } else {\n            profile_events.insert(key, v.profile_events.clone());\n        }\n    }\n    for v in queries.values_mut() {\n        if v.is_initial_query\n            && let Some(pe) = profile_events.get(&(v.initial_query_id.clone(), v.host_name.clone()))\n        {\n            v.profile_events = pe.clone();\n        }\n    }\n}\n\n#[derive(Copy, Clone, PartialEq, Eq, Hash)]\npub enum QueriesColumn {\n    Selection,\n    HostName,\n    SubQueries,\n    Cpu,\n    IOWait,\n    CPUWait,\n    User,\n    Threads,\n    Memory,\n    DiskIO,\n    IO,\n    NetIO,\n    Elapsed,\n    QueryEnd,\n    QueryId,\n    IsCancelled,\n    Query,\n}\nimpl PartialEq<Query> for Query {\n    fn eq(&self, other: &Self) -> bool {\n        return self.query_id == other.query_id && self.host_name == other.host_name;\n    }\n}\n\nimpl TableViewItem<QueriesColumn> for Query {\n    fn to_column(&self, column: QueriesColumn) -> String {\n        let formatter = SizeFormatter::new()\n            .with_base(Base::Base2)\n            .with_style(Style::Abbreviated);\n\n        match column {\n            QueriesColumn::Selection => {\n                if self.selection {\n                    \"x\".to_string()\n                } else {\n                    \" \".to_string()\n                }\n            }\n            QueriesColumn::HostName => self\n                .display_host_name\n                .as_deref()\n                .unwrap_or(&self.host_name)\n                .to_string(),\n            QueriesColumn::SubQueries => {\n                if self.is_initial_query {\n                    return self.subqueries.to_string();\n                } else {\n                    return 1.to_string();\n                }\n            }\n            QueriesColumn::Cpu => format!(\"{:.1} %\", self.cpu()),\n            QueriesColumn::IOWait => format!(\"{:.1} %\", self.io_wait()),\n            QueriesColumn::CPUWait => format!(\"{:.1} %\", self.cpu_wait()),\n            QueriesColumn::User => self.user.clone(),\n            QueriesColumn::Threads => self.threads.to_string(),\n            QueriesColumn::Memory => formatter.format(self.memory),\n            QueriesColumn::DiskIO => formatter.format(self.disk_io() as i64),\n            QueriesColumn::IO => formatter.format(self.io() as i64),\n            QueriesColumn::NetIO => formatter.format(self.net_io() as i64),\n            QueriesColumn::Elapsed => format!(\"{:.2}\", self.elapsed),\n            QueriesColumn::QueryEnd => format!(\"{}\", self.query_end_time_microseconds),\n            QueriesColumn::QueryId => {\n                if self.subqueries > 1 && self.is_initial_query {\n                    return format!(\"-> {}\", self.query_id);\n                } else {\n                    return self.query_id.clone();\n                }\n            }\n            QueriesColumn::IsCancelled => {\n                if self.is_cancelled {\n                    \"x\".to_string()\n                } else {\n                    \" \".to_string()\n                }\n            }\n            QueriesColumn::Query => self.normalized_query.clone(),\n        }\n    }\n\n    fn cmp(&self, other: &Self, column: QueriesColumn) -> Ordering\n    where\n        Self: Sized,\n    {\n        match column {\n            QueriesColumn::Selection => self.selection.cmp(&other.selection),\n            QueriesColumn::HostName => self.host_name.cmp(&other.host_name),\n            QueriesColumn::SubQueries => self.subqueries.cmp(&other.subqueries),\n            QueriesColumn::Cpu => self.cpu().total_cmp(&other.cpu()),\n            QueriesColumn::IOWait => self.io_wait().total_cmp(&other.io_wait()),\n            QueriesColumn::CPUWait => self.cpu_wait().total_cmp(&other.cpu_wait()),\n            QueriesColumn::User => self.user.cmp(&other.user),\n            QueriesColumn::Threads => self.threads.cmp(&other.threads),\n            QueriesColumn::Memory => self.memory.cmp(&other.memory),\n            QueriesColumn::DiskIO => self.disk_io().total_cmp(&other.disk_io()),\n            QueriesColumn::IO => self.io().total_cmp(&other.io()),\n            QueriesColumn::NetIO => self.net_io().total_cmp(&other.net_io()),\n            QueriesColumn::Elapsed => self.elapsed.total_cmp(&other.elapsed),\n            QueriesColumn::QueryEnd => self\n                .query_end_time_microseconds\n                .cmp(&other.query_end_time_microseconds),\n            QueriesColumn::QueryId => self.query_id.cmp(&other.query_id),\n            QueriesColumn::IsCancelled => self.is_cancelled.cmp(&other.is_cancelled),\n            QueriesColumn::Query => self.normalized_query.cmp(&other.normalized_query),\n        }\n    }\n\n    fn to_column_styled(&self, column: QueriesColumn) -> StyledString {\n        let text = self.to_column(column);\n        if self.is_cancelled {\n            let mut styled = StyledString::new();\n            styled.append_styled(text, CursiveStyle::from(Color::Dark(BaseColor::Yellow)));\n            return styled;\n        }\n        StyledString::plain(text)\n    }\n}\n\npub struct QueriesView {\n    context: ContextArc,\n    table: TableView<Query, QueriesColumn>,\n    items: HashMap<QueryKey, Query>,\n    // For show only specific query\n    query_id: Option<String>,\n    // For multi selection\n    selected_query_ids: HashSet<QueryKey>,\n    has_selection_column: bool,\n    options: ViewOptions,\n    // Is this running processes, or queries from system.query_log?\n    is_system_processes: bool,\n    // Used to filter queries\n    filter: Arc<Mutex<String>>,\n    // Number of queries to render\n    limit: Arc<Mutex<u64>>,\n    // Keep clipboard alive so X11 clipboard manager can persist the data\n    clipboard: Option<arboard::Clipboard>,\n\n    #[allow(unused)]\n    bg_runner: BackgroundRunner,\n}\n\n#[derive(Debug, Clone)]\npub enum Type {\n    ProcessList,\n    SlowQueryLog,\n    LastQueryLog,\n}\n\nimpl QueriesView {\n    inner_getters!(self.table: TableView<Query, QueriesColumn>);\n\n    pub fn update(&mut self, processes: Columns) -> Result<()> {\n        let prev_items = take(&mut self.items);\n\n        // Selected queries should be updated, since in the new query list it may not be exists\n        // already\n        let mut new_selected_query_ids = HashSet::new();\n\n        for i in 0..processes.row_count() {\n            let mut query = Query::from_clickhouse_block(&processes, i, self.is_system_processes)?;\n\n            let key = query_key(&query);\n            if self.selected_query_ids.contains(&key) {\n                new_selected_query_ids.insert(key.clone());\n            }\n\n            if let Some(prev_item) = prev_items.get(&key) {\n                query.prev_elapsed = Some(prev_item.elapsed);\n                query.prev_profile_events = Some(prev_item.profile_events.clone());\n            }\n\n            self.items.insert(key, query);\n        }\n\n        queries_count_subqueries(&mut self.items);\n        if !self.options.no_subqueries {\n            queries_sum_profile_events(&mut self.items);\n        }\n\n        self.selected_query_ids = new_selected_query_ids;\n        self.update_view();\n\n        return Ok(());\n    }\n\n    fn update_view(&mut self) {\n        let mut items = Vec::new();\n        if let Some(query_id) = &self.query_id {\n            for query in self.items.values() {\n                if query.initial_query_id == *query_id {\n                    items.push(query.clone());\n                }\n            }\n        } else {\n            let mut query_ids = HashSet::new();\n            for query in self.items.values() {\n                query_ids.insert(&query.query_id);\n            }\n\n            for query in self.items.values() {\n                if self.options.group_by {\n                    // In case of grouping, do not show initial queries if they have initial query.\n                    if !query.is_initial_query && query_ids.contains(&query.initial_query_id) {\n                        continue;\n                    }\n                }\n                items.push(query.clone());\n            }\n        }\n\n        // Compute stripped hostname for display (to_column uses display_host_name)\n        if !self.options.no_strip_hostname_suffix && items.len() > 1 {\n            let (common_prefix, common_suffix) =\n                find_common_hostname_prefix_and_suffix(items.iter().map(|q| q.host_name.as_str()));\n\n            if !common_prefix.is_empty() || !common_suffix.is_empty() {\n                for item in &mut items {\n                    let mut hostname = item.host_name.as_str();\n\n                    if !common_prefix.is_empty()\n                        && let Some(stripped) = hostname.strip_prefix(&common_prefix)\n                    {\n                        hostname = stripped;\n                    }\n\n                    if !common_suffix.is_empty()\n                        && let Some(stripped) = hostname.strip_suffix(&common_suffix)\n                    {\n                        hostname = stripped;\n                    }\n\n                    item.display_host_name = Some(hostname.to_string());\n                }\n            }\n        }\n\n        if !self.selected_query_ids.is_empty() {\n            if !self.has_selection_column {\n                self.table\n                    .insert_column(0, QueriesColumn::Selection, \"v\", |c| c.width(1));\n                self.has_selection_column = true;\n            }\n            for item in &mut items {\n                item.selection = self.selected_query_ids.contains(&query_key(item));\n            }\n        } else if self.has_selection_column {\n            self.table.remove_column(0);\n            self.has_selection_column = false;\n        }\n\n        self.table.set_items_stable(items);\n    }\n\n    fn show_flamegraph(&mut self, tui: bool, trace_type: Option<TraceType>) -> Result<()> {\n        let (query_ids, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_ids()?;\n        let mut context_locked = self.context.lock().unwrap();\n        if let Some(trace_type) = trace_type {\n            context_locked.worker.send(\n                true,\n                WorkerEvent::QueryFlameGraph(\n                    trace_type,\n                    tui,\n                    min_query_start_microseconds,\n                    max_query_end_microseconds,\n                    query_ids,\n                ),\n            );\n        } else {\n            context_locked\n                .worker\n                .send(true, WorkerEvent::LiveQueryFlameGraph(tui, Some(query_ids)));\n        }\n\n        return Ok(());\n    }\n\n    fn show_flamegraph_diff(&mut self, trace_type: TraceType) -> Result<()> {\n        let (groups, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_id_groups()?;\n        if groups.len() != 2 {\n            return Err(Error::msg(format!(\n                \"Flamegraph diff requires exactly 2 queries selected with <Space>, got {}\",\n                groups.len()\n            )));\n        }\n        let mut groups_iter = groups.into_iter();\n        let query_ids_a = groups_iter.next().unwrap();\n        let query_ids_b = groups_iter.next().unwrap();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked.worker.send(\n            true,\n            WorkerEvent::QueryFlameGraphDiff(\n                trace_type,\n                min_query_start_microseconds,\n                max_query_end_microseconds,\n                query_ids_a,\n                query_ids_b,\n            ),\n        );\n\n        return Ok(());\n    }\n\n    fn get_selected_query(&self) -> Result<Query> {\n        let item_index = self.table.item().ok_or(Error::msg(\"No query selected\"))?;\n        let item = self\n            .table\n            .borrow_item(item_index)\n            .ok_or(Error::msg(\"No such row anymore\"))?;\n        return Ok(item.clone());\n    }\n\n    fn get_query_ids(&self) -> Result<(Vec<String>, DateTime<Local>, Option<DateTime<Local>>)> {\n        let selected_query = self.get_selected_query()?;\n        let current_query_id = selected_query.query_id.clone();\n        let mut min_query_start_microseconds = selected_query.query_start_time_microseconds;\n        let mut max_query_end_microseconds = Option::<DateTime<Local>>::None;\n\n        let mut query_ids = Vec::new();\n\n        // In case of multi selection ignore current row, but otherwise current query_id should be\n        // added since it may not be contained in self.items already.\n        if self.selected_query_ids.is_empty() {\n            query_ids.push(current_query_id.clone());\n        }\n\n        if !self.selected_query_ids.is_empty() {\n            for q in self.items.values() {\n                // NOTE: we have to look at both here, since selected_query_ids contains\n                // (query_id, host_name) not (initial_query_id, host_name), while we are\n                // curious about both\n                let key = query_key(q);\n                let initial_key = (q.initial_query_id.clone(), q.host_name.clone());\n                if self.selected_query_ids.contains(&initial_key)\n                    || self.selected_query_ids.contains(&key)\n                {\n                    query_ids.push(q.query_id.clone());\n                }\n            }\n        } else {\n            for q in self.items.values() {\n                if q.initial_query_id == current_query_id {\n                    query_ids.push(q.query_id.clone());\n                }\n            }\n        }\n\n        // Update min_query_start_microseconds/max_query_end_microseconds\n        {\n            let query_ids_set = HashSet::<&String>::from_iter(query_ids.iter());\n            for q in self.items.values() {\n                if !query_ids_set.contains(&q.query_id) {\n                    continue;\n                }\n                if q.query_start_time_microseconds < min_query_start_microseconds {\n                    min_query_start_microseconds = q.query_start_time_microseconds;\n                }\n                if !self.is_system_processes {\n                    if let Some(max) = max_query_end_microseconds {\n                        if q.query_end_time_microseconds > max {\n                            max_query_end_microseconds = Some(q.query_end_time_microseconds);\n                        }\n                    } else {\n                        max_query_end_microseconds = Some(q.query_end_time_microseconds);\n                    }\n                }\n            }\n        }\n\n        return Ok((\n            query_ids,\n            min_query_start_microseconds,\n            max_query_end_microseconds,\n        ));\n    }\n\n    /// Group selected queries by their initial_query_id so each logical distributed\n    /// query becomes a single group of constituent query_ids. Preserves the selection\n    /// order: the group whose initial_query_id first appears among the selected rows\n    /// comes first.\n    fn get_query_id_groups(\n        &self,\n    ) -> Result<(Vec<Vec<String>>, DateTime<Local>, Option<DateTime<Local>>)> {\n        if self.selected_query_ids.len() < 2 {\n            return Err(Error::msg(\n                \"Select at least 2 queries with <Space> to diff their flamegraphs\",\n            ));\n        }\n\n        // Dedup initial_query_ids for the selected rows, keeping insertion order so the\n        // diff is deterministic (first-selected is \"before\", next \"after\").\n        let mut initial_query_ids: Vec<String> = Vec::new();\n        for q in self.items.values() {\n            let key = query_key(q);\n            let initial_key = (q.initial_query_id.clone(), q.host_name.clone());\n            if (self.selected_query_ids.contains(&initial_key)\n                || self.selected_query_ids.contains(&key))\n                && !initial_query_ids.contains(&q.initial_query_id)\n            {\n                initial_query_ids.push(q.initial_query_id.clone());\n            }\n        }\n\n        let mut min_start: Option<DateTime<Local>> = None;\n        let mut max_end: Option<DateTime<Local>> = None;\n        let mut groups: Vec<Vec<String>> = Vec::with_capacity(initial_query_ids.len());\n        for iqid in &initial_query_ids {\n            let mut group = Vec::new();\n            for q in self.items.values() {\n                if &q.initial_query_id != iqid {\n                    continue;\n                }\n                group.push(q.query_id.clone());\n                min_start = Some(match min_start {\n                    Some(cur) => cur.min(q.query_start_time_microseconds),\n                    None => q.query_start_time_microseconds,\n                });\n                if !self.is_system_processes {\n                    max_end = Some(match max_end {\n                        Some(cur) => cur.max(q.query_end_time_microseconds),\n                        None => q.query_end_time_microseconds,\n                    });\n                }\n            }\n            if !group.is_empty() {\n                groups.push(group);\n            }\n        }\n\n        let min_start = min_start.ok_or_else(|| Error::msg(\"No queries matched selection\"))?;\n        return Ok((groups, min_start, max_end));\n    }\n\n    pub fn update_limit(&mut self, is_sub: bool) {\n        let new_limit = if is_sub {\n            self.limit.clone().lock().unwrap().saturating_sub(20)\n        } else {\n            self.limit.clone().lock().unwrap().saturating_add(20)\n        };\n        *self.limit.clone().lock().unwrap() = new_limit;\n        log::debug!(\"Set limit to {}\", new_limit);\n    }\n\n    fn action_show_query_logs(&mut self) -> Result<Option<EventResult>> {\n        let (query_ids, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_ids()?;\n        let context_copy = self.context.clone();\n        self.context\n            .lock()\n            .unwrap()\n            .cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(views::Dialog::around(\n                    views::LinearLayout::vertical()\n                        .child(views::TextView::new(\"Logs:\").center())\n                        .child(views::DummyView.fixed_height(1))\n                        .child(views::NamedView::new(\n                            \"query_log\",\n                            TextLogView::new(\n                                \"query_log\",\n                                context_copy,\n                                TextLogArguments {\n                                    query_ids: Some(query_ids),\n                                    logger_names: None,\n                                    hostname: None,\n                                    message_filter: None,\n                                    max_level: None,\n                                    start: min_query_start_microseconds,\n                                    end: RelativeDateTime::from(max_query_end_microseconds),\n                                },\n                            ),\n                        )),\n                ));\n                siv.focus_name(\"query_log\").unwrap();\n            }))\n            .unwrap();\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_show_flamegraph(\n        &mut self,\n        tui: bool,\n        trace_type: Option<TraceType>,\n    ) -> Result<Option<EventResult>> {\n        self.show_flamegraph(tui, trace_type)?;\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_show_flamegraph_diff(\n        &mut self,\n        trace_type: TraceType,\n    ) -> Result<Option<EventResult>> {\n        self.show_flamegraph_diff(trace_type)?;\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_query_profile_events(&mut self) -> Result<Option<EventResult>> {\n        // Check if multiple queries are selected\n        if self.selected_query_ids.len() > 1 {\n            // Get the queries for diff view\n            let queries: Vec<Query> = self\n                .items\n                .values()\n                .filter(|q| self.selected_query_ids.contains(&query_key(q)))\n                .cloned()\n                .collect();\n\n            if queries.is_empty() {\n                return Err(Error::msg(\"No queries selected\"));\n            }\n\n            self.context\n                .lock()\n                .unwrap()\n                .cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(\n                        views::Dialog::around(\n                            QueryView::new_diff(queries, \"process\").min_size((120, 35)),\n                        )\n                        .title(\"Profile Events Diff\"),\n                    );\n                }))\n                .unwrap();\n        } else {\n            // Single query - show as before\n            let selected_query = self.get_selected_query()?;\n            self.context\n                .lock()\n                .unwrap()\n                .cb_sink\n                .send(Box::new(move |siv: &mut cursive::Cursive| {\n                    siv.add_layer(views::Dialog::around(\n                        QueryView::new(selected_query, \"process\").min_size((120, 35)),\n                    ));\n                }))\n                .unwrap();\n        }\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_query_details(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        Ok(Some(EventResult::Consumed(Some(Callback::from_fn_once(\n            move |siv| {\n                siv.add_layer(views::Dialog::info(selected_query.to_string()).title(\"Details\"));\n            },\n        )))))\n    }\n\n    fn action_edit_query_and_execute(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let settings = selected_query.settings.clone();\n        let mut context_locked = self.context.lock().unwrap();\n\n        let query = edit_query(&query, &settings)?;\n        context_locked\n            .worker\n            .send(true, WorkerEvent::ExecuteQuery(database, query));\n\n        Ok(Some(EventResult::Consumed(Some(Callback::from_fn_once(\n            |siv| siv.complete_clear(),\n        )))))\n    }\n\n    fn action_show_query(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let settings = selected_query.settings.clone();\n\n        let query = get_query(&query, &settings);\n        let query = format!(\"USE {};\\n{}\", database, query);\n\n        self.context\n            .lock()\n            .unwrap()\n            .cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(views::Dialog::around(\n                    views::LinearLayout::vertical()\n                        .child(views::TextView::new(\"Query:\").center())\n                        .child(views::DummyView.fixed_height(1))\n                        .child(views::TextView::new(query).scrollable()),\n                ));\n            }))\n            .unwrap();\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_copy_query(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n\n        match arboard::Clipboard::new() {\n            Ok(mut clipboard) => {\n                if let Err(e) = clipboard.set_text(query) {\n                    return Ok(Some(EventResult::Consumed(Some(Callback::from_fn_once(\n                        move |siv| {\n                            siv.add_layer(Dialog::info(format!(\n                                \"Failed to copy to clipboard: {}\",\n                                e\n                            )));\n                        },\n                    )))));\n                }\n                self.clipboard = Some(clipboard);\n            }\n            Err(e) => {\n                return Ok(Some(EventResult::Consumed(Some(Callback::from_fn_once(\n                    move |siv| {\n                        siv.add_layer(Dialog::info(format!(\"Failed to access clipboard: {}\", e)));\n                    },\n                )))));\n            }\n        }\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_explain_syntax(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let settings = selected_query.settings.clone();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked\n            .worker\n            .send(true, WorkerEvent::ExplainSyntax(database, query, settings));\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_explain_plan(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked\n            .worker\n            .send(true, WorkerEvent::ExplainPlan(database, query));\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_explain_pipeline(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked\n            .worker\n            .send(true, WorkerEvent::ExplainPipeline(database, query));\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_select(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let key = query_key(&selected_query);\n\n        if self.selected_query_ids.contains(&key) {\n            self.selected_query_ids.remove(&key);\n        } else {\n            self.selected_query_ids.insert(key);\n        }\n        self.update_view();\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_show_all_queries(&mut self) -> Result<Option<EventResult>> {\n        self.query_id = None;\n        self.update_view();\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_show_queries_on_shards(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query_id = selected_query.query_id.clone();\n\n        self.query_id = Some(query_id);\n        self.update_view();\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_explain_indexes(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked\n            .worker\n            .send(true, WorkerEvent::ExplainPlanIndexes(database, query));\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_explain_pipeline_graph(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query = selected_query.original_query.clone();\n        let database = selected_query.current_database.clone();\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked.worker.send(\n            true,\n            WorkerEvent::ExplainPipelineShareGraph(database, query),\n        );\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_kill_query(&mut self) -> Result<Option<EventResult>> {\n        let selected_query = self.get_selected_query()?;\n        let query_id = selected_query.query_id.clone();\n        let context_copy = self.context.clone();\n        self.context\n            .lock()\n            .unwrap()\n            .cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(\n                    views::Dialog::new()\n                        .title(format!(\n                            \"Are you sure you want to KILL QUERY with query_id = {}\",\n                            query_id\n                        ))\n                        .button(\"Yes, I'm sure\", move |s| {\n                            context_copy\n                                .lock()\n                                .unwrap()\n                                .worker\n                                .send(true, WorkerEvent::KillQuery(query_id.clone()));\n                            s.pop_layer();\n                        })\n                        .button(\"Cancel\", |s| {\n                            s.pop_layer();\n                        }),\n                );\n            }))\n            .unwrap();\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_export_perfetto(&mut self) -> Result<Option<EventResult>> {\n        let (query_ids, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_ids()?;\n\n        let query_ids_set: HashSet<&String> = HashSet::from_iter(query_ids.iter());\n        let queries: Vec<_> = self\n            .items\n            .values()\n            .filter(|q| query_ids_set.contains(&q.query_id))\n            .cloned()\n            .collect();\n\n        let mut context_locked = self.context.lock().unwrap();\n        context_locked.worker.send(\n            true,\n            WorkerEvent::PerfettoExport(\n                queries,\n                query_ids,\n                min_query_start_microseconds,\n                max_query_end_microseconds,\n            ),\n        );\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_increase_limit(&mut self) -> Result<Option<EventResult>> {\n        self.update_limit(true);\n        self.bg_runner.schedule();\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_decrease_limit(&mut self) -> Result<Option<EventResult>> {\n        self.update_limit(false);\n        self.bg_runner.schedule();\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_query_processors(&mut self) -> Result<Option<EventResult>> {\n        let (query_ids, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_ids()?;\n        let columns = vec![\n            \"name\",\n            \"count() count\",\n            \"sum(elapsed_us)/1e6 elapsed_sec\",\n            \"sum(input_wait_elapsed_us)/1e6 input_wait_sec\",\n            \"sum(output_wait_elapsed_us)/1e6 output_wait_sec\",\n            \"sum(input_rows) rows\",\n            \"sum(input_bytes) bytes\",\n            \"round(bytes/elapsed_sec,2)/1e6 MB_per_sec\",\n        ];\n        let sort_by = \"elapsed_sec\";\n        let table = \"processors_profile_log\";\n        let dbtable = self\n            .context\n            .lock()\n            .unwrap()\n            .clickhouse\n            .get_log_table_name(\"system\", table);\n\n        let max_query_end_with_buffer = max_query_end_microseconds.unwrap_or(Local::now())\n            + TimeDelta::seconds(QUERY_TIME_DRIFT_BUFFER_SECONDS);\n\n        let query = format!(\n            r#\"\n            WITH\n                fromUnixTimestamp64Nano({}) AS start_time_,\n                fromUnixTimestamp64Nano({}) AS end_time_\n            SELECT {}\n            FROM {}\n            WHERE\n                    event_date >= toDate(start_time_) AND event_time >  toDateTime(start_time_) AND event_time_microseconds > start_time_\n                AND event_date <= toDate(end_time_)   AND event_time <= toDateTime(end_time_)   AND event_time_microseconds <= end_time_\n                AND query_id IN ('{}')\n            GROUP BY name\n            ORDER BY name ASC\n            \"#,\n            min_query_start_microseconds\n                .timestamp_nanos_opt()\n                .ok_or(Error::msg(\"Invalid time\"))?,\n            max_query_end_with_buffer\n                .timestamp_nanos_opt()\n                .ok_or(Error::msg(\"Invalid time\"))?,\n            columns.join(\", \"),\n            dbtable,\n            query_ids.join(\"','\"),\n        );\n\n        let context_copy = self.context.clone();\n        self.context\n            .lock()\n            .unwrap()\n            .cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(views::Dialog::around(\n                    views::LinearLayout::vertical()\n                        .child(views::TextView::new(\"Processors:\").center())\n                        .child(views::DummyView.fixed_height(1))\n                        .child(\n                            SQLQueryView::new(\n                                context_copy,\n                                table,\n                                sort_by,\n                                columns.clone(),\n                                vec![\"name\"],\n                                query,\n                            )\n                            .unwrap_or_else(|_| panic!(\"Cannot get {}\", table))\n                            .with_name(table)\n                            .min_size((160, 40)),\n                        ),\n                ));\n            }))\n            .unwrap();\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    fn action_query_views(&mut self) -> Result<Option<EventResult>> {\n        let (query_ids, min_query_start_microseconds, max_query_end_microseconds) =\n            self.get_query_ids()?;\n        let columns = vec![\"view_name\", \"view_duration_ms\"];\n        let sort_by = \"view_duration_ms\";\n        let table = \"query_views_log\";\n        let dbtable = self\n            .context\n            .lock()\n            .unwrap()\n            .clickhouse\n            .get_log_table_name(\"system\", table);\n\n        let max_query_end_with_buffer = max_query_end_microseconds.unwrap_or(Local::now())\n            + TimeDelta::seconds(QUERY_TIME_DRIFT_BUFFER_SECONDS);\n\n        let query = format!(\n            r#\"\n            WITH\n                fromUnixTimestamp64Nano({}) AS start_time_,\n                fromUnixTimestamp64Nano({}) AS end_time_\n            SELECT {}\n            FROM {}\n            WHERE\n                    event_date >= toDate(start_time_) AND event_time >  toDateTime(start_time_) AND event_time_microseconds > start_time_\n                AND event_date <= toDate(end_time_)   AND event_time <= toDateTime(end_time_)   AND event_time_microseconds <= end_time_\n                AND initial_query_id IN ('{}')\n            ORDER BY view_duration_ms DESC\n            \"#,\n            min_query_start_microseconds\n                .timestamp_nanos_opt()\n                .ok_or(Error::msg(\"Invalid time\"))?,\n            max_query_end_with_buffer\n                .timestamp_nanos_opt()\n                .ok_or(Error::msg(\"Invalid time\"))?,\n            columns.join(\", \"),\n            dbtable,\n            query_ids.join(\"','\"),\n        );\n\n        let context_copy = self.context.clone();\n        self.context\n            .lock()\n            .unwrap()\n            .cb_sink\n            .send(Box::new(move |siv: &mut cursive::Cursive| {\n                siv.add_layer(views::Dialog::around(\n                    views::LinearLayout::vertical()\n                        .child(views::TextView::new(\"Views:\").center())\n                        .child(views::DummyView.fixed_height(1))\n                        .child(\n                            SQLQueryView::new(\n                                context_copy,\n                                table,\n                                sort_by,\n                                columns.clone(),\n                                vec![\"view_name\"],\n                                query,\n                            )\n                            .unwrap_or_else(|_| panic!(\"Cannot get {}\", table))\n                            .with_name(table)\n                            .min_size((160, 40)),\n                        ),\n                ));\n            }))\n            .unwrap();\n\n        Ok(Some(EventResult::consumed()))\n    }\n\n    /// Ignore rustfmt max_width, otherwise callback actions looks ugly\n    #[rustfmt::skip]\n    pub fn new(\n        context: ContextArc,\n        processes_type: Type,\n        view_name: &'static str,\n        title: &str,\n    ) -> views::OnEventView<Self> {\n        // Macro to simplify adding view actions\n        macro_rules! add_action {\n            // With shortcut and method arguments\n            ($ctx:expr, $view:expr, $desc:expr, $shortcut:expr, $method:ident($($args:expr),*)) => {\n                $ctx.add_view_action($view, $desc, $shortcut, |v| {\n                    v.downcast_mut::<QueriesView>().unwrap().$method($($args),*)\n                })\n            };\n            // Without shortcut but with method arguments\n            ($ctx:expr, $view:expr, $desc:expr, $method:ident($($args:expr),*)) => {\n                $ctx.add_view_action_without_shortcut($view, $desc, |v| {\n                    v.downcast_mut::<QueriesView>().unwrap().$method($($args),*)\n                })\n            };\n            // With shortcut (char or Event), no arguments\n            ($ctx:expr, $view:expr, $desc:expr, $shortcut:expr, $method:ident) => {\n                $ctx.add_view_action($view, $desc, $shortcut, |v| {\n                    v.downcast_mut::<QueriesView>().unwrap().$method()\n                })\n            };\n            // Without shortcut, no arguments\n            ($ctx:expr, $view:expr, $desc:expr, $method:ident) => {\n                $ctx.add_view_action_without_shortcut($view, $desc, |v| {\n                    v.downcast_mut::<QueriesView>().unwrap().$method()\n                })\n            };\n        }\n\n        let delay = context.lock().unwrap().options.view.delay_interval;\n\n        let is_system_processes = matches!(processes_type, Type::ProcessList);\n        let filter = context.lock().unwrap().queries_filter.clone();\n        let limit = context.lock().unwrap().queries_limit.clone();\n\n        let update_callback_context = context.clone();\n        let update_callback_filter = filter.clone();\n        let update_callback_limit = limit.clone();\n        let update_callback_process_type = processes_type.clone();\n        let update_callback = move |force: bool| {\n            let mut context = update_callback_context.lock().unwrap();\n            let filter = update_callback_filter.lock().unwrap().clone();\n            let limit = *update_callback_limit.lock().unwrap();\n\n            let start_time = context.options.view.start.clone();\n            let end_time = context.options.view.end.clone();\n\n            match update_callback_process_type {\n                Type::ProcessList => context\n                    .worker\n                    .send(force, WorkerEvent::ProcessList(filter, limit)),\n                Type::SlowQueryLog => context.worker.send(\n                    force,\n                    WorkerEvent::SlowQueryLog(filter, start_time, end_time, limit),\n                ),\n                Type::LastQueryLog => context.worker.send(\n                    force,\n                    WorkerEvent::LastQueryLog(filter, start_time, end_time, limit),\n                ),\n            }\n        };\n\n        let mut table = TableView::<Query, QueriesColumn>::new();\n        table.add_column(QueriesColumn::QueryId, \"query_id\", |c| c.width_min_max(8, 16));\n        table.add_column(QueriesColumn::Cpu, \"cpu\", |c| c.width_min_max(3, 8));\n        table.add_column(QueriesColumn::IOWait, \"io_wait\", |c| c.width_min_max(7, 11));\n        table.add_column(QueriesColumn::CPUWait, \"cpu_wait\", |c| c.width_min_max(8, 12));\n        table.add_column(QueriesColumn::User, \"user\", |c| c.width_min_max(4, 12));\n        table.add_column(QueriesColumn::Threads, \"thr\", |c| c.width_min_max(3, 6));\n        table.add_column(QueriesColumn::Memory, \"mem\", |c| c.width_min_max(3, 8));\n        table.add_column(QueriesColumn::DiskIO, \"disk\", |c| c.width_min_max(4, 8));\n        table.add_column(QueriesColumn::IO, \"io\", |c| c.width_min_max(2, 8));\n        table.add_column(QueriesColumn::NetIO, \"net\", |c| c.width_min_max(3, 8));\n        table.add_column(QueriesColumn::Elapsed, \"elapsed\", |c| c.width_min_max(7, 11));\n        table.add_column(QueriesColumn::IsCancelled, \"cancel\", |c| {\n            c.width_min_max(1, 6)\n        });\n        table.add_column(QueriesColumn::Query, \"query\", |c| c.width_min(20));\n        table.set_on_submit(|siv, _row, _index| {\n            let context = siv.user_data::<ContextArc>().unwrap().clone();\n            let query_actions = context\n                .lock()\n                .unwrap()\n                .view_actions\n                .iter()\n                .map(|x| &x.description)\n                .cloned()\n                .collect();\n\n            crate::utils::fuzzy_actions(siv, query_actions, move |siv, action_text| {\n                {\n                    log::trace!(\"Triggering {:?} (from query row submit)\", action_text);\n\n                    let mut context = context.lock().unwrap();\n                    if let Some(action) = context\n                        .view_actions\n                        .iter()\n                        .find(|x| x.description.text == action_text)\n                    {\n                        context.pending_view_callback = Some(action.callback.clone());\n                    }\n                }\n                siv.on_event(Event::Refresh);\n            });\n        });\n\n        if matches!(processes_type, Type::LastQueryLog) {\n            table.add_column(QueriesColumn::QueryEnd, \"end\", |c| c.width_min_max(19, 25));\n            table.sort_by(QueriesColumn::QueryEnd, Ordering::Greater);\n        } else {\n            table.sort_by(QueriesColumn::Elapsed, Ordering::Greater);\n        }\n\n        let view_options = context.lock().unwrap().options.view.clone();\n\n        if !view_options.no_subqueries {\n            table.insert_column(0, QueriesColumn::SubQueries, \"Q#\", |c| c.width_min_max(2, 5));\n        }\n\n        // Only show hostname column when in cluster mode AND no host filter is active\n        let (cluster, selected_host) = {\n            let ctx = context.lock().unwrap();\n            (ctx.options.clickhouse.cluster.is_some(), ctx.selected_host.clone())\n        };\n        if cluster && selected_host.is_none() {\n            table.insert_column(0, QueriesColumn::HostName, \"host\", |c| c.width_min_max(4, 16));\n        }\n\n        table.set_title(title);\n\n        let bg_runner_cv = context.lock().unwrap().background_runner_cv.clone();\n        let bg_runner_force = context.lock().unwrap().background_runner_force.clone();\n        let mut bg_runner = BackgroundRunner::new(delay, bg_runner_cv, bg_runner_force);\n        bg_runner.start(update_callback);\n\n        let processes_view = QueriesView {\n            context: context.clone(),\n            table,\n            items: HashMap::new(),\n            query_id: None,\n            selected_query_ids: HashSet::new(),\n            has_selection_column: false,\n            options: view_options,\n            is_system_processes,\n            filter,\n            limit,\n            clipboard: None,\n            bg_runner,\n        };\n\n        // TODO:\n        // - pause/disable the table if the foreground view had been changed\n        // - space - multiquery selection (KILL, flamegraphs, logs, ...)\n        let mut event_view = views::OnEventView::new(processes_view);\n\n        let context_copy = context.clone();\n        event_view.set_on_pre_event_inner(Event::Refresh, move |v, _| {\n            let action_callback = context_copy.lock().unwrap().pending_view_callback.take();\n            if let Some(action_callback) = action_callback {\n                let result = action_callback.as_ref()(v);\n                match result {\n                    Err(err) => {\n                        return Some(EventResult::with_cb_once(move |siv: &mut Cursive| {\n                            siv.add_layer(Dialog::info(err.to_string()));\n                        }));\n                    }\n                    Ok(event) => return event,\n                }\n            }\n            return Some(EventResult::Ignored);\n        });\n        log::debug!(\"Adding views actions\");\n        let mut context = context.lock().unwrap();\n\n        //\n        // NOTE: Place most common first\n        //\n        add_action!(context, &mut event_view, \"Query logs\", 'l', action_show_query_logs);\n        add_action!(context, &mut event_view, \"Query live flamegraph\", 'L', action_show_flamegraph(true, None));\n        add_action!(context, &mut event_view, \"Query profile events\", action_query_profile_events);\n        add_action!(context, &mut event_view, \"Query details\", action_query_details);\n        add_action!(context, &mut event_view, \"Query CPU flamegraph\", 'C', action_show_flamegraph(true, Some(TraceType::CPU)));\n        add_action!(context, &mut event_view, \"Query Real flamegraph\", 'R', action_show_flamegraph(true, Some(TraceType::Real)));\n        add_action!(context, &mut event_view, \"Query memory flamegraph\", 'M', action_show_flamegraph(true, Some(TraceType::Memory)));\n        add_action!(context, &mut event_view, \"Query memory sample flamegraph\", action_show_flamegraph(true, Some(TraceType::MemorySample)));\n        add_action!(context, &mut event_view, \"Query jemalloc sample flamegraph\", action_show_flamegraph(true, Some(TraceType::JemallocSample)));\n        add_action!(context, &mut event_view, \"Query MemoryAllocatedWithoutCheck flamegraph\", action_show_flamegraph(true, Some(TraceType::MemoryAllocatedWithoutCheck)));\n        add_action!(context, &mut event_view, \"Query events flamegraph\", action_show_flamegraph(true, Some(TraceType::ProfileEvent)));\n        add_action!(context, &mut event_view, \"Export to Perfetto\", 'X', action_export_perfetto);\n        add_action!(context, &mut event_view, \"Edit query and execute\", Event::AltChar('E'), action_edit_query_and_execute);\n        add_action!(context, &mut event_view, \"Show query\", 'S', action_show_query);\n        add_action!(context, &mut event_view, \"Copy query to clipboard\", 'y', action_copy_query);\n        add_action!(context, &mut event_view, \"EXPLAIN SYNTAX\", 's', action_explain_syntax);\n        add_action!(context, &mut event_view, \"EXPLAIN PLAN\", 'e', action_explain_plan);\n        add_action!(context, &mut event_view, \"EXPLAIN PIPELINE\", 'E', action_explain_pipeline);\n        context.add_view_action(&mut event_view, \"Filter\", '/', move |_v| {\n            return Ok(Some(EventResult::Consumed(Some(Callback::from_fn(\n                move |siv: &mut Cursive| {\n                    let filter_cb = move |siv: &mut Cursive, text: &str| {\n                        siv.call_on_name(view_name, |v: &mut OnEventView<QueriesView>| {\n                            let v = v.get_inner_mut();\n                            log::info!(\"Set filter to '{}'\", text);\n                            *v.filter.lock().unwrap() = text.to_string();\n                            // Trigger update\n                            //\n                            // NOTE: It will require first summary view and only after\n                            // processes view, and this may be slow in case of cluster mode, and\n                            // should be addressed.\n                            v.bg_runner.schedule();\n                        });\n                        siv.pop_layer();\n                    };\n                    show_bottom_prompt(siv, \"/\", filter_cb);\n                },\n            )))));\n        });\n        add_action!(context, &mut event_view, \"Select\", ' ', action_select);\n        add_action!(context, &mut event_view, \"Show all queries\", '-', action_show_all_queries);\n        // It is handy to use \"Shift-\" after \"Shift+\" to go back, instead of just \"-\"\n        add_action!(context, &mut event_view, \"Show all queries\", '_', action_show_all_queries);\n        add_action!(context, &mut event_view, \"Show queries on shards\", '+', action_show_queries_on_shards);\n        add_action!(context, &mut event_view, \"Query processors\", 'P', action_query_processors);\n        add_action!(context, &mut event_view, \"Query views\", 'v', action_query_views);\n        add_action!(context, &mut event_view, \"Share Query CPU flamegraph\", action_show_flamegraph(false, Some(TraceType::CPU)));\n        add_action!(context, &mut event_view, \"Share Query Real flamegraph\", action_show_flamegraph(false, Some(TraceType::Real)));\n        add_action!(context, &mut event_view, \"Share Query memory flamegraph\", action_show_flamegraph(false, Some(TraceType::Memory)));\n        add_action!(context, &mut event_view, \"Share Query memory sample flamegraph\", action_show_flamegraph(false, Some(TraceType::MemorySample)));\n        add_action!(context, &mut event_view, \"Share Query jemalloc sample flamegraph\", action_show_flamegraph(false, Some(TraceType::JemallocSample)));\n        add_action!(context, &mut event_view, \"Share Query MemoryAllocatedWithoutCheck flamegraph\", action_show_flamegraph(false, Some(TraceType::MemoryAllocatedWithoutCheck)));\n        add_action!(context, &mut event_view, \"Share Query events flamegraph\", action_show_flamegraph(false, Some(TraceType::ProfileEvent)));\n        add_action!(context, &mut event_view, \"Share Query live flamegraph\", action_show_flamegraph(false, None));\n        add_action!(context, &mut event_view, \"Query CPU flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::CPU));\n        add_action!(context, &mut event_view, \"Query Real flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::Real));\n        add_action!(context, &mut event_view, \"Query memory flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::Memory));\n        add_action!(context, &mut event_view, \"Query memory sample flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::MemorySample));\n        add_action!(context, &mut event_view, \"Query jemalloc sample flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::JemallocSample));\n        add_action!(context, &mut event_view, \"Query MemoryAllocatedWithoutCheck flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::MemoryAllocatedWithoutCheck));\n        add_action!(context, &mut event_view, \"Query events flamegraph diff (select 2 with <Space>)\", action_show_flamegraph_diff(TraceType::ProfileEvent));\n        add_action!(context, &mut event_view, \"EXPLAIN INDEXES\", 'I', action_explain_indexes);\n        add_action!(context, &mut event_view, \"EXPLAIN PIPELINE graph=1 (share)\", 'G', action_explain_pipeline_graph);\n        add_action!(context, &mut event_view, \"KILL query\", 'K', action_kill_query);\n        add_action!(context, &mut event_view, \"Increase number of queries to render to 20\", '(', action_increase_limit);\n        add_action!(context, &mut event_view, \"Decrease number of queries to render to 20\", ')', action_decrease_limit);\n        return event_view;\n    }\n}\n\nimpl Drop for QueriesView {\n    fn drop(&mut self) {\n        log::debug!(\"Removing views actions\");\n        self.context.lock().unwrap().view_actions.clear();\n    }\n}\n\n// TODO: remove this extra wrapping\nimpl ViewWrapper for QueriesView {\n    wrap_impl_no_move!(self.table: TableView<Query, QueriesColumn>);\n}\n"
  },
  {
    "path": "src/view/query_view.rs",
    "content": "use crate::interpreter::Query;\nuse crate::view::TableViewItem;\nuse crate::view::table_view::TableView;\nuse cursive::theme::{BaseColor, Color, ColorStyle};\nuse cursive::traits::Nameable;\nuse cursive::utils::markup::StyledString;\nuse cursive::views::{NamedView, OnEventView};\nuse cursive::{Cursive, view::ViewWrapper, wrap_impl};\nuse humantime::format_duration;\nuse size::{Base, SizeFormatter, Style};\nuse std::cmp::Ordering;\nuse std::sync::{Arc, Mutex};\nuse std::time::Duration;\n\n#[derive(Copy, Clone, PartialEq, Eq, Hash)]\npub enum QueryDetailsColumn {\n    Name,\n    Current,\n    Rate,\n    // Dynamic columns for diff view: Q1, Q2, ..., QN\n    QueryValue(usize),\n}\n#[derive(Clone, Debug)]\npub struct QueryProcessDetails {\n    name: String,\n    current: u64,\n    rate: f64,\n    // Flag to indicate if this is a diff value that should be highlighted\n    is_diff: bool,\n    // Values from multiple queries (for diff view)\n    query_values: Vec<u64>,\n}\n\nimpl PartialEq<QueryProcessDetails> for QueryProcessDetails {\n    fn eq(&self, other: &Self) -> bool {\n        return *self.name == other.name;\n    }\n}\n\n// TODO:\n// - human print\n// - colored print\n// - auto refresh\n// - implement loadavg like with moving average\nimpl QueryProcessDetails {\n    fn format_value(&self, value: u64) -> String {\n        let fmt_bytes = SizeFormatter::new()\n            .with_base(Base::Base2)\n            .with_style(Style::Abbreviated);\n        let fmt_rows = SizeFormatter::new()\n            .with_base(Base::Base10)\n            .with_style(Style::Abbreviated);\n\n        if self.name.contains(\"Microseconds\") {\n            format!(\"{}\", format_duration(Duration::from_micros(value)))\n        } else if self.name.contains(\"Millisecond\") {\n            format!(\"{}\", format_duration(Duration::from_millis(value)))\n        } else if self.name.contains(\"Ns\") || self.name.contains(\"Nanoseconds\") {\n            format!(\"{}\", format_duration(Duration::from_nanos(value)))\n        } else if self.name.contains(\"Bytes\") || self.name.contains(\"Chars\") {\n            fmt_bytes.format(value as i64)\n        } else if value > 1_000 {\n            fmt_rows.format(value as i64)\n        } else {\n            value.to_string()\n        }\n    }\n\n    fn format_rate(&self, rate: f64) -> String {\n        let fmt_bytes = SizeFormatter::new()\n            .with_base(Base::Base2)\n            .with_style(Style::Abbreviated);\n        let fmt_rows = SizeFormatter::new()\n            .with_base(Base::Base10)\n            .with_style(Style::Abbreviated);\n\n        if self.name.contains(\"Microseconds\") {\n            format!(\"{}/s\", format_duration(Duration::from_micros(rate as u64)))\n        } else if self.name.contains(\"Millisecond\") {\n            format!(\"{}/s\", format_duration(Duration::from_millis(rate as u64)))\n        } else if self.name.contains(\"Ns\") || self.name.contains(\"Nanoseconds\") {\n            format!(\"{}/s\", format_duration(Duration::from_nanos(rate as u64)))\n        } else if self.name.contains(\"Bytes\") || self.name.contains(\"Chars\") {\n            fmt_bytes.format(rate as i64) + \"/s\"\n        } else if rate > 1e3 {\n            fmt_rows.format(rate as i64) + \"/s\"\n        } else {\n            format!(\"{:.2}\", rate)\n        }\n    }\n}\n\nimpl TableViewItem<QueryDetailsColumn> for QueryProcessDetails {\n    fn to_column(&self, column: QueryDetailsColumn) -> String {\n        match column {\n            QueryDetailsColumn::Name => self.name.clone(),\n            QueryDetailsColumn::QueryValue(idx) => {\n                if idx < self.query_values.len() {\n                    self.format_value(self.query_values[idx])\n                } else {\n                    String::new()\n                }\n            }\n            QueryDetailsColumn::Current => self.format_value(self.current),\n            QueryDetailsColumn::Rate => self.format_rate(self.rate),\n        }\n    }\n\n    fn cmp(&self, other: &Self, column: QueryDetailsColumn) -> Ordering\n    where\n        Self: Sized,\n    {\n        match column {\n            QueryDetailsColumn::Name => self.name.cmp(&other.name),\n            QueryDetailsColumn::Current => self.current.cmp(&other.current),\n            QueryDetailsColumn::Rate => self.rate.total_cmp(&other.rate),\n            QueryDetailsColumn::QueryValue(idx) => {\n                let self_val = self.query_values.get(idx).copied().unwrap_or(0);\n                let other_val = other.query_values.get(idx).copied().unwrap_or(0);\n                self_val.cmp(&other_val)\n            }\n        }\n    }\n\n    fn to_column_styled(&self, column: QueryDetailsColumn) -> StyledString {\n        let text = self.to_column(column);\n\n        // Highlight based on different conditions\n        let should_highlight_miss =\n            matches!(column, QueryDetailsColumn::Name) && self.name.to_lowercase().contains(\"miss\");\n\n        // For diff view, highlight QueryValue columns where values differ\n        let should_highlight_diff = if self.is_diff {\n            if let QueryDetailsColumn::QueryValue(idx) = column {\n                // Check if this value differs from others\n                if let Some(&current_val) = self.query_values.get(idx) {\n                    // Check if any other value is different\n                    self.query_values.iter().any(|&v| v != current_val)\n                } else {\n                    false\n                }\n            } else {\n                false\n            }\n        } else {\n            false\n        };\n\n        if should_highlight_miss {\n            let mut styled = StyledString::new();\n            styled.append_styled(\n                text,\n                ColorStyle::new(Color::Dark(BaseColor::Red), Color::TerminalDefault),\n            );\n            styled\n        } else if should_highlight_diff {\n            let mut styled = StyledString::new();\n            styled.append_styled(\n                text,\n                ColorStyle::new(Color::Dark(BaseColor::Green), Color::TerminalDefault),\n            );\n            styled\n        } else {\n            StyledString::plain(text)\n        }\n    }\n}\n\npub struct QueryView {\n    table: TableView<QueryProcessDetails, QueryDetailsColumn>,\n    all_items: Vec<QueryProcessDetails>,\n    filter: Arc<Mutex<String>>,\n}\n\nimpl QueryView {\n    fn apply_filter(&mut self) {\n        let filter_text = self.filter.lock().unwrap().clone();\n        let filter_lower = filter_text.to_lowercase();\n\n        let filtered_items: Vec<QueryProcessDetails> = if filter_text.is_empty() {\n            self.all_items.clone()\n        } else {\n            self.all_items\n                .iter()\n                .filter(|item| item.name.to_lowercase().contains(&filter_lower))\n                .cloned()\n                .collect()\n        };\n\n        self.table.set_items_stable(filtered_items);\n    }\n\n    pub fn new(query: Query, view_name: &'static str) -> NamedView<OnEventView<Self>> {\n        Self::new_internal(vec![query], view_name)\n    }\n\n    pub fn new_diff(queries: Vec<Query>, view_name: &'static str) -> NamedView<OnEventView<Self>> {\n        Self::new_internal(queries, view_name)\n    }\n\n    fn new_internal(queries: Vec<Query>, view_name: &'static str) -> NamedView<OnEventView<Self>> {\n        let mut table = TableView::<QueryProcessDetails, QueryDetailsColumn>::new();\n        table.add_column(QueryDetailsColumn::Name, \"Name\", |c| c.width_min(20));\n\n        let is_diff_view = queries.len() > 1;\n\n        if is_diff_view {\n            // Add a column for each query\n            for idx in 0..queries.len() {\n                let col_name = if queries.len() <= 10 {\n                    format!(\"q{}\", idx + 1)\n                } else {\n                    format!(\"q{:02}\", idx + 1)\n                };\n                table.add_column(QueryDetailsColumn::QueryValue(idx), &col_name, |c| {\n                    c.width_min_max(7, 12)\n                });\n            }\n        } else {\n            table.add_column(QueryDetailsColumn::Current, \"Current\", |c| {\n                c.width_min_max(7, 12)\n            });\n            table.add_column(QueryDetailsColumn::Rate, \"Per second rate\", |c| {\n                c.width_min_max(16, 20)\n            });\n        }\n\n        // Collect all profile event names\n        let mut all_event_names = std::collections::HashSet::new();\n        for query in &queries {\n            for name in query.profile_events.keys() {\n                all_event_names.insert(name.clone());\n            }\n        }\n\n        let mut items = Vec::new();\n\n        // Add query duration as a special profile event (only in diff view)\n        if is_diff_view {\n            let mut query_values = Vec::new();\n            let mut max_duration = 0_u64;\n\n            for query in &queries {\n                // Convert elapsed seconds to microseconds for consistency with other time metrics\n                let duration_us = (query.elapsed * 1_000_000.0) as u64;\n                query_values.push(duration_us);\n                max_duration = max_duration.max(duration_us);\n            }\n\n            items.push(QueryProcessDetails {\n                name: \"QueryDurationMicroseconds\".to_string(),\n                current: max_duration,\n                rate: 0.0, // Rate doesn't make sense for query duration\n                is_diff: is_diff_view,\n                query_values,\n            });\n        }\n\n        // Add all other profile events\n        for event_name in all_event_names {\n            let mut query_values = Vec::new();\n            let mut max_value = 0_u64;\n\n            for query in &queries {\n                let value = query.profile_events.get(&event_name).copied().unwrap_or(0);\n                query_values.push(value);\n                max_value = max_value.max(value);\n            }\n\n            let rate = if !queries.is_empty() {\n                max_value as f64 / queries[0].elapsed\n            } else {\n                0.0\n            };\n\n            items.push(QueryProcessDetails {\n                name: event_name,\n                current: max_value,\n                rate,\n                is_diff: is_diff_view,\n                query_values,\n            });\n        }\n        table.set_items(items.clone());\n\n        table.sort_by(QueryDetailsColumn::Current, Ordering::Greater);\n        table.set_selected_row(0);\n\n        let filter = Arc::new(Mutex::new(String::new()));\n\n        let view = QueryView {\n            table,\n            all_items: items,\n            filter: filter.clone(),\n        };\n\n        let event_view = OnEventView::new(view).on_event('/', move |siv: &mut Cursive| {\n            let filter_cb = move |siv: &mut Cursive, text: &str| {\n                siv.call_on_name(view_name, |v: &mut NamedView<OnEventView<QueryView>>| {\n                    let mut event_view = v.get_mut();\n                    let query_view = event_view.get_inner_mut();\n                    *query_view.filter.lock().unwrap() = text.to_string();\n                    query_view.apply_filter();\n                });\n                siv.pop_layer();\n            };\n\n            crate::view::show_bottom_prompt(siv, \"/\", filter_cb);\n        });\n\n        return event_view.with_name(view_name);\n    }\n}\n\nimpl ViewWrapper for QueryView {\n    wrap_impl!(self.table: TableView<QueryProcessDetails, QueryDetailsColumn>);\n}\n"
  },
  {
    "path": "src/view/registry.rs",
    "content": "use super::provider::ViewProvider;\nuse crate::interpreter::options::ChDigViews;\nuse std::sync::Arc;\n\npub struct ViewRegistry {\n    providers: Vec<(&'static str, Arc<dyn ViewProvider>)>,\n}\n\nimpl ViewRegistry {\n    pub fn new() -> Self {\n        Self {\n            providers: Vec::new(),\n        }\n    }\n\n    pub fn register(&mut self, provider: Arc<dyn ViewProvider>) {\n        let name = provider.name();\n        self.providers.push((name, provider));\n    }\n\n    pub fn get(&self, name: &str) -> Arc<dyn ViewProvider> {\n        self.providers\n            .iter()\n            .find(|(n, _)| *n == name)\n            .map(|(_, p)| p.clone())\n            .unwrap()\n    }\n\n    pub fn get_by_view_type(&self, view_type: ChDigViews) -> Arc<dyn ViewProvider> {\n        self.providers\n            .iter()\n            .find(|(_, p)| p.view_type() == view_type)\n            .map(|(_, p)| p.clone())\n            .unwrap()\n    }\n}\n\nimpl Default for ViewRegistry {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n"
  },
  {
    "path": "src/view/search_history.rs",
    "content": "use std::collections::VecDeque;\nuse std::sync::{Arc, Mutex};\n\n#[derive(Clone)]\npub struct SearchHistory {\n    history: Arc<Mutex<VecDeque<String>>>,\n    current_index: Arc<Mutex<Option<usize>>>,\n    temp_content: Arc<Mutex<String>>,\n}\n\nimpl SearchHistory {\n    pub fn new() -> Self {\n        SearchHistory {\n            history: Arc::new(Mutex::new(VecDeque::new())),\n            current_index: Arc::new(Mutex::new(None)),\n            temp_content: Arc::new(Mutex::new(String::new())),\n        }\n    }\n\n    pub fn add_entry(&self, entry: String) {\n        if entry.is_empty() {\n            return;\n        }\n        let mut history = self.history.lock().unwrap();\n\n        // Remove duplicate if it exists\n        if let Some(pos) = history.iter().position(|x| x == &entry) {\n            history.remove(pos);\n        }\n\n        // Add to front\n        history.push_front(entry);\n    }\n\n    pub fn reset_index(&self) {\n        *self.current_index.lock().unwrap() = None;\n    }\n\n    pub fn navigate_up(&self, current_content: &str) -> Option<String> {\n        let history = self.history.lock().unwrap();\n        if history.is_empty() {\n            return None;\n        }\n\n        let mut index = self.current_index.lock().unwrap();\n        let mut temp = self.temp_content.lock().unwrap();\n\n        match *index {\n            None => {\n                // First time pressing up - save current content and go to most recent\n                *temp = current_content.to_string();\n                *index = Some(0);\n                Some(history[0].clone())\n            }\n            Some(i) => {\n                // Move to older entry\n                if i + 1 < history.len() {\n                    *index = Some(i + 1);\n                    Some(history[i + 1].clone())\n                } else {\n                    None\n                }\n            }\n        }\n    }\n\n    pub fn navigate_down(&self) -> Option<String> {\n        let history = self.history.lock().unwrap();\n        let mut index = self.current_index.lock().unwrap();\n        let temp = self.temp_content.lock().unwrap();\n\n        match *index {\n            None => None,\n            Some(0) => {\n                // Back to the temporary content\n                *index = None;\n                Some(temp.clone())\n            }\n            Some(i) => {\n                // Move to newer entry\n                *index = Some(i - 1);\n                Some(history[i - 1].clone())\n            }\n        }\n    }\n}\n\nimpl Default for SearchHistory {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n"
  },
  {
    "path": "src/view/settings_view.rs",
    "content": "use crate::interpreter::{ContextArc, options::ChDigViews};\nuse cursive::{\n    Cursive,\n    event::{Event, Key},\n    theme::Effect,\n    utils::markup::StyledString,\n    view::{Nameable, Resizable},\n    views::{\n        Checkbox, Dialog, DummyView, EditView, LinearLayout, OnEventView, ScrollView, TextView,\n    },\n};\n\nfn apply_settings(siv: &mut Cursive, context: &ContextArc) {\n    let history = siv\n        .call_on_name(\"set_history\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let internal_queries = siv\n        .call_on_name(\"set_internal_queries\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let limit_str = siv\n        .call_on_name(\"set_limit\", |v: &mut EditView| v.get_content())\n        .unwrap();\n    let logs_order_desc = siv\n        .call_on_name(\"set_logs_order_desc\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let skip_unavailable_shards = siv\n        .call_on_name(\"set_skip_unavailable_shards\", |v: &mut Checkbox| {\n            v.is_checked()\n        })\n        .unwrap();\n\n    let delay_str = siv\n        .call_on_name(\"set_delay_interval\", |v: &mut EditView| v.get_content())\n        .unwrap();\n    let group_by = siv\n        .call_on_name(\"set_group_by\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let no_subqueries = siv\n        .call_on_name(\"set_no_subqueries\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let wrap = siv\n        .call_on_name(\"set_wrap\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let no_strip = siv\n        .call_on_name(\"set_no_strip_hostname_suffix\", |v: &mut Checkbox| {\n            v.is_checked()\n        })\n        .unwrap();\n    let queries_filter = siv\n        .call_on_name(\"set_queries_filter\", |v: &mut EditView| {\n            (*v.get_content()).clone()\n        })\n        .unwrap();\n    let queries_limit_str = siv\n        .call_on_name(\"set_queries_limit\", |v: &mut EditView| v.get_content())\n        .unwrap();\n    let start_str = siv\n        .call_on_name(\"set_start\", |v: &mut EditView| v.get_content())\n        .unwrap();\n    let end_str = siv\n        .call_on_name(\"set_end\", |v: &mut EditView| v.get_content())\n        .unwrap();\n\n    let otel = siv\n        .call_on_name(\"set_otel\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let trace_log = siv\n        .call_on_name(\"set_trace_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let query_metric = siv\n        .call_on_name(\"set_query_metric_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let part_log = siv\n        .call_on_name(\"set_part_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let query_thread = siv\n        .call_on_name(\"set_query_thread_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let text_log = siv\n        .call_on_name(\"set_text_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let text_log_android = siv\n        .call_on_name(\"set_text_log_android\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let per_server = siv\n        .call_on_name(\"set_per_server\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n\n    let metric_log = siv\n        .call_on_name(\"set_metric_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let async_metric_log = siv\n        .call_on_name(\"set_async_metric_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let async_insert_log = siv\n        .call_on_name(\"set_async_insert_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let error_log = siv\n        .call_on_name(\"set_error_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let s3_queue_log = siv\n        .call_on_name(\"set_s3_queue_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let azure_queue_log = siv\n        .call_on_name(\"set_azure_queue_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let blob_storage_log = siv\n        .call_on_name(\"set_blob_storage_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let bg_pool_log = siv\n        .call_on_name(\"set_bg_pool_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let session_log = siv\n        .call_on_name(\"set_session_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n    let zk_log = siv\n        .call_on_name(\"set_zk_log\", |v: &mut Checkbox| v.is_checked())\n        .unwrap();\n\n    let limit: u64 = match limit_str.parse() {\n        Ok(v) => v,\n        Err(_) => {\n            siv.add_layer(Dialog::info(\"Invalid limit value\"));\n            return;\n        }\n    };\n    let delay_ms: u64 = match delay_str.parse() {\n        Ok(v) => v,\n        Err(_) => {\n            siv.add_layer(Dialog::info(\"Invalid delay_interval value\"));\n            return;\n        }\n    };\n    let queries_limit: u64 = match queries_limit_str.parse() {\n        Ok(v) => v,\n        Err(_) => {\n            siv.add_layer(Dialog::info(\"Invalid queries_limit value\"));\n            return;\n        }\n    };\n    let new_start = match start_str.parse::<crate::common::RelativeDateTime>() {\n        Ok(v) => v,\n        Err(err) => {\n            siv.add_layer(Dialog::info(format!(\"Invalid start: {}\", err)));\n            return;\n        }\n    };\n    let new_end = match end_str.parse::<crate::common::RelativeDateTime>() {\n        Ok(v) => v,\n        Err(err) => {\n            siv.add_layer(Dialog::info(format!(\"Invalid end: {}\", err)));\n            return;\n        }\n    };\n\n    {\n        let mut ctx = context.lock().unwrap();\n        ctx.options.clickhouse.history = history;\n        ctx.options.clickhouse.internal_queries = internal_queries;\n        ctx.options.clickhouse.limit = limit;\n        ctx.options.clickhouse.logs_order = if logs_order_desc {\n            crate::interpreter::options::LogsOrder::Desc\n        } else {\n            crate::interpreter::options::LogsOrder::Asc\n        };\n        ctx.options.clickhouse.skip_unavailable_shards = skip_unavailable_shards;\n\n        ctx.options.view.delay_interval = std::time::Duration::from_millis(delay_ms);\n        ctx.options.view.group_by = group_by;\n        ctx.options.view.no_subqueries = no_subqueries;\n        ctx.options.view.wrap = wrap;\n        ctx.options.view.no_strip_hostname_suffix = no_strip;\n        *ctx.queries_filter.lock().unwrap() = queries_filter;\n        ctx.options.view.queries_limit = queries_limit;\n        *ctx.queries_limit.lock().unwrap() = queries_limit;\n        ctx.options.view.start = new_start;\n        ctx.options.view.end = new_end;\n\n        ctx.options.perfetto.opentelemetry_span_log = otel;\n        ctx.options.perfetto.trace_log = trace_log;\n        ctx.options.perfetto.query_metric_log = query_metric;\n        ctx.options.perfetto.part_log = part_log;\n        ctx.options.perfetto.query_thread_log = query_thread;\n        ctx.options.perfetto.text_log = text_log;\n        ctx.options.perfetto.text_log_android = text_log_android;\n        ctx.options.perfetto.per_server = per_server;\n        ctx.options.perfetto.metric_log = metric_log;\n        ctx.options.perfetto.asynchronous_metric_log = async_metric_log;\n        ctx.options.perfetto.asynchronous_insert_log = async_insert_log;\n        ctx.options.perfetto.error_log = error_log;\n        ctx.options.perfetto.s3_queue_log = s3_queue_log;\n        ctx.options.perfetto.azure_queue_log = azure_queue_log;\n        ctx.options.perfetto.blob_storage_log = blob_storage_log;\n        ctx.options.perfetto.background_schedule_pool_log = bg_pool_log;\n        ctx.options.perfetto.session_log = session_log;\n        ctx.options.perfetto.aggregated_zookeeper_log = zk_log;\n\n        ctx.trigger_view_refresh();\n    }\n    siv.pop_layer();\n}\n\npub fn show_settings_dialog(siv: &mut Cursive) {\n    if siv.find_name::<Dialog>(\"settings\").is_some() {\n        siv.pop_layer();\n        return;\n    }\n\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    let (opts, server_version, selected_host, current_view, queries_filter) = {\n        let ctx = context.lock().unwrap();\n        (\n            ctx.options.clone(),\n            ctx.server_version.clone(),\n            ctx.selected_host.clone(),\n            ctx.current_view,\n            ctx.queries_filter.lock().unwrap().clone(),\n        )\n    };\n\n    let bold = |s: &str| TextView::new(StyledString::styled(s, Effect::Bold));\n    let checkbox_row = |label: &str, name: &str, checked: bool| {\n        LinearLayout::horizontal()\n            .child(DummyView.fixed_width(2))\n            .child(Checkbox::new().with_checked(checked).with_name(name))\n            .child(TextView::new(format!(\" {}\", label)))\n    };\n    let edit_row = |label: &str, name: &str, value: &str, width: usize| {\n        LinearLayout::horizontal()\n            .child(TextView::new(format!(\"  {}: \", label)))\n            .child(\n                EditView::new()\n                    .content(value)\n                    .with_name(name)\n                    .fixed_width(width),\n            )\n    };\n\n    let mut layout = LinearLayout::vertical();\n\n    // ClickHouse\n    layout.add_child(bold(\"ClickHouse:\"));\n    layout.add_child(TextView::new(format!(\n        \"  url: {}\",\n        opts.clickhouse.url_safe\n    )));\n    if let Some(ref cluster) = opts.clickhouse.cluster {\n        layout.add_child(TextView::new(format!(\"  cluster: {}\", cluster)));\n    }\n    layout.add_child(checkbox_row(\n        \"history\",\n        \"set_history\",\n        opts.clickhouse.history,\n    ));\n    layout.add_child(checkbox_row(\n        \"internal_queries\",\n        \"set_internal_queries\",\n        opts.clickhouse.internal_queries,\n    ));\n    layout.add_child(edit_row(\n        \"limit\",\n        \"set_limit\",\n        &opts.clickhouse.limit.to_string(),\n        12,\n    ));\n    layout.add_child(checkbox_row(\n        \"logs_order=desc (newest first)\",\n        \"set_logs_order_desc\",\n        opts.clickhouse.logs_order == crate::interpreter::options::LogsOrder::Desc,\n    ));\n    layout.add_child(checkbox_row(\n        \"skip_unavailable_shards\",\n        \"set_skip_unavailable_shards\",\n        opts.clickhouse.skip_unavailable_shards,\n    ));\n    layout.add_child(TextView::new(format!(\n        \"  server_version: {}\",\n        server_version\n    )));\n    layout.add_child(DummyView);\n\n    // View\n    layout.add_child(bold(\"View:\"));\n    layout.add_child(edit_row(\n        \"delay_interval (ms)\",\n        \"set_delay_interval\",\n        &opts.view.delay_interval.as_millis().to_string(),\n        12,\n    ));\n    layout.add_child(checkbox_row(\"group_by\", \"set_group_by\", opts.view.group_by));\n    layout.add_child(checkbox_row(\n        \"no_subqueries\",\n        \"set_no_subqueries\",\n        opts.view.no_subqueries,\n    ));\n    layout.add_child(checkbox_row(\"wrap\", \"set_wrap\", opts.view.wrap));\n    layout.add_child(checkbox_row(\n        \"no_strip_hostname_suffix\",\n        \"set_no_strip_hostname_suffix\",\n        opts.view.no_strip_hostname_suffix,\n    ));\n    layout.add_child(edit_row(\n        \"queries_filter\",\n        \"set_queries_filter\",\n        &queries_filter,\n        30,\n    ));\n    layout.add_child(edit_row(\n        \"queries_limit\",\n        \"set_queries_limit\",\n        &opts.view.queries_limit.to_string(),\n        12,\n    ));\n    layout.add_child(edit_row(\n        \"start\",\n        \"set_start\",\n        &opts.view.start.to_editable_string(),\n        22,\n    ));\n    layout.add_child(edit_row(\n        \"end\",\n        \"set_end\",\n        &opts.view.end.to_editable_string(),\n        22,\n    ));\n    layout.add_child(DummyView);\n\n    // Service (read-only)\n    layout.add_child(bold(\"Service:\"));\n    layout.add_child(TextView::new(format!(\n        \"  log: {}\",\n        opts.service.log.as_deref().unwrap_or(\"(none)\")\n    )));\n    layout.add_child(TextView::new(format!(\n        \"  chdig_config: {}\",\n        opts.service.chdig_config.as_deref().unwrap_or(\"(none)\")\n    )));\n    layout.add_child(DummyView);\n\n    // Perfetto (query)\n    layout.add_child(bold(\"Perfetto (query):\"));\n    layout.add_child(checkbox_row(\n        \"opentelemetry_span_log\",\n        \"set_otel\",\n        opts.perfetto.opentelemetry_span_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"trace_log\",\n        \"set_trace_log\",\n        opts.perfetto.trace_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"query_metric_log\",\n        \"set_query_metric_log\",\n        opts.perfetto.query_metric_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"part_log\",\n        \"set_part_log\",\n        opts.perfetto.part_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"query_thread_log\",\n        \"set_query_thread_log\",\n        opts.perfetto.query_thread_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"text_log\",\n        \"set_text_log\",\n        opts.perfetto.text_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"text_log_android\",\n        \"set_text_log_android\",\n        opts.perfetto.text_log_android,\n    ));\n    layout.add_child(checkbox_row(\n        \"per_server\",\n        \"set_per_server\",\n        opts.perfetto.per_server,\n    ));\n    layout.add_child(DummyView);\n\n    // Perfetto (server)\n    layout.add_child(bold(\"Perfetto (server):\"));\n    layout.add_child(checkbox_row(\n        \"metric_log\",\n        \"set_metric_log\",\n        opts.perfetto.metric_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"asynchronous_metric_log\",\n        \"set_async_metric_log\",\n        opts.perfetto.asynchronous_metric_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"asynchronous_insert_log\",\n        \"set_async_insert_log\",\n        opts.perfetto.asynchronous_insert_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"error_log\",\n        \"set_error_log\",\n        opts.perfetto.error_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"s3_queue_log\",\n        \"set_s3_queue_log\",\n        opts.perfetto.s3_queue_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"azure_queue_log\",\n        \"set_azure_queue_log\",\n        opts.perfetto.azure_queue_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"blob_storage_log\",\n        \"set_blob_storage_log\",\n        opts.perfetto.blob_storage_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"background_schedule_pool_log\",\n        \"set_bg_pool_log\",\n        opts.perfetto.background_schedule_pool_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"session_log\",\n        \"set_session_log\",\n        opts.perfetto.session_log,\n    ));\n    layout.add_child(checkbox_row(\n        \"aggregated_zookeeper_log\",\n        \"set_zk_log\",\n        opts.perfetto.aggregated_zookeeper_log,\n    ));\n    layout.add_child(DummyView);\n\n    // Runtime (read-only)\n    layout.add_child(bold(\"Runtime:\"));\n    layout.add_child(TextView::new(format!(\n        \"  selected_host: {}\",\n        selected_host.as_deref().unwrap_or(\"(all)\")\n    )));\n    layout.add_child(TextView::new(format!(\n        \"  current_view: {:?}\",\n        current_view.unwrap_or(ChDigViews::Queries)\n    )));\n\n    let context_for_apply = context.clone();\n    let context_for_enter = context;\n\n    let content = OnEventView::new(ScrollView::new(layout)).on_pre_event(\n        Event::Key(Key::Enter),\n        move |siv| {\n            apply_settings(siv, &context_for_enter);\n        },\n    );\n\n    let dialog = Dialog::new()\n        .title(\"Settings\")\n        .content(content)\n        .button(\"Apply\", move |siv| {\n            apply_settings(siv, &context_for_apply);\n        })\n        .button(\"Cancel\", |siv| {\n            siv.pop_layer();\n        });\n    siv.add_layer(dialog.with_name(\"settings\"));\n}\n"
  },
  {
    "path": "src/view/sql_query_view.rs",
    "content": "use std::cmp::Ordering;\nuse std::sync::{Arc, Mutex};\n\nuse anyhow::{Result, anyhow};\nuse size::{Base, SizeFormatter, Style};\n\nuse crate::interpreter::{BackgroundRunner, ContextArc, WorkerEvent, clickhouse::Columns};\nuse crate::view::TableViewItem;\nuse crate::view::table_view::TableView;\nuse crate::wrap_impl_no_move;\nuse chrono::{DateTime, Local};\nuse chrono_tz::Tz;\nuse clickhouse_rs::types::SqlType;\nuse cursive::Cursive;\nuse cursive::view::ViewWrapper;\nuse cursive::views::OnEventView;\n\n#[derive(Clone, Debug, PartialEq, PartialOrd)]\npub enum Field {\n    String(String),\n    Float64(f64),\n    Float32(f32),\n    UInt64(u64),\n    UInt32(u32),\n    UInt16(u16),\n    UInt8(u8),\n    Int64(i64),\n    Int32(i32),\n    Int16(i16),\n    Int8(i8),\n    DateTime(DateTime<Local>),\n    // TODO: support more types\n}\n\nimpl Field {\n    // TODO: write this in a better way\n    pub fn as_datetime(&self) -> Option<DateTime<Local>> {\n        if let Field::DateTime(dt) = self {\n            Some(*dt)\n        } else {\n            None\n        }\n    }\n}\n\nimpl std::fmt::Display for Field {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        // TODO: add human time formatter\n        let fmt_bytes = SizeFormatter::new()\n            // TODO: use Base10 for rows and Base2 for bytes\n            .with_base(Base::Base2)\n            .with_style(Style::Abbreviated);\n\n        match *self {\n            Self::String(ref value) => write!(f, \"{}\", value),\n            Self::Float64(ref value) => write!(f, \"{:.2}\", value),\n            Self::Float32(ref value) => write!(f, \"{:.2}\", value),\n            Self::UInt64(ref value) => {\n                if *value < 1_000 {\n                    write!(f, \"{}\", value)\n                } else {\n                    write!(f, \"{}\", fmt_bytes.format(*value as i64))\n                }\n            }\n            Self::UInt32(ref value) => write!(f, \"{}\", value),\n            Self::UInt16(ref value) => write!(f, \"{}\", value),\n            Self::UInt8(ref value) => write!(f, \"{}\", value),\n            Self::Int64(ref value) => {\n                if *value < 1_000 {\n                    write!(f, \"{}\", value)\n                } else {\n                    write!(f, \"{}\", fmt_bytes.format(*value))\n                }\n            }\n            Self::Int32(ref value) => write!(f, \"{}\", value),\n            Self::Int16(ref value) => write!(f, \"{}\", value),\n            Self::Int8(ref value) => write!(f, \"{}\", value),\n            Self::DateTime(ref value) => write!(f, \"{}\", value),\n        }\n    }\n}\n\n#[derive(Clone, Default, Debug)]\n// Fields:\n// - list of fields\n// - indices of fields to compare (columns_to_compare)\npub struct Row(pub Vec<Field>, Vec<usize>);\n\nimpl PartialEq<Row> for Row {\n    fn eq(&self, other: &Self) -> bool {\n        for &idx in &self.1 {\n            if self.0[idx] != other.0[idx] {\n                return false;\n            }\n        }\n        return true;\n    }\n}\n\nimpl TableViewItem<u8> for Row {\n    fn to_column(&self, column: u8) -> String {\n        return self.0[column as usize].to_string();\n    }\n\n    fn cmp(&self, other: &Self, column: u8) -> Ordering\n    where\n        Self: Sized,\n    {\n        let index = column as usize;\n        let field_lhs = &self.0[index];\n        let field_rhs = &other.0[index];\n        return field_lhs.partial_cmp(field_rhs).unwrap();\n    }\n}\n\ntype RowCallback = Arc<dyn Fn(&mut Cursive, Vec<&'static str>, Row) + Send + Sync>;\n\n/// (bar_column_name, source_column_name)\ntype BarColumnConfig = (&'static str, &'static str);\n\nconst BAR_WIDTH: usize = 10;\nconst BAR_FILLED: char = '█';\nconst BAR_EMPTY: char = '░';\n\nfn render_bar(value: f64, max: f64) -> String {\n    if max <= 0.0 {\n        return std::iter::repeat_n(BAR_EMPTY, BAR_WIDTH).collect();\n    }\n    let filled = ((value / max) * BAR_WIDTH as f64).round() as usize;\n    let filled = filled.min(BAR_WIDTH);\n    std::iter::repeat_n(BAR_FILLED, filled)\n        .chain(std::iter::repeat_n(BAR_EMPTY, BAR_WIDTH - filled))\n        .collect()\n}\n\nfn field_to_f64(field: &Field) -> f64 {\n    match *field {\n        Field::UInt64(v) => v as f64,\n        Field::UInt32(v) => v as f64,\n        Field::UInt16(v) => v as f64,\n        Field::UInt8(v) => v as f64,\n        Field::Int64(v) => v as f64,\n        Field::Int32(v) => v as f64,\n        Field::Int16(v) => v as f64,\n        Field::Int8(v) => v as f64,\n        Field::Float64(v) => v,\n        Field::Float32(v) => v as f64,\n        _ => 0.0,\n    }\n}\n\npub struct SQLQueryView {\n    table: TableView<Row, u8>,\n\n    // Indices of columns to compare for PartialEq\n    columns_to_compare: Vec<usize>,\n    columns: Vec<&'static str>,\n    on_submit: Option<RowCallback>,\n\n    // Store all items and filter\n    all_items: Vec<Row>,\n    filter: Arc<Mutex<String>>,\n\n    bar_columns: Vec<BarColumnConfig>,\n\n    #[allow(unused)]\n    bg_runner: BackgroundRunner,\n}\n\nimpl SQLQueryView {\n    pub fn set_title<S: Into<String>>(&mut self, title: S) {\n        self.table.set_title(title);\n    }\n\n    pub fn update(&mut self, block: Columns) -> Result<()> {\n        let mut items = Vec::new();\n\n        for i in 0..block.row_count() {\n            let mut row = Row::default();\n            for &column in &self.columns {\n                let sql_column = block\n                    .columns()\n                    .iter()\n                    .find(|c| c.name() == column)\n                    .ok_or(anyhow!(\"Cannot get {} column\", column))?;\n                let field = match sql_column.sql_type() {\n                    SqlType::String => Field::String(block.get::<_, _>(i, column)?),\n                    SqlType::Float64 => Field::Float64(block.get::<_, _>(i, column)?),\n                    SqlType::Float32 => Field::Float32(block.get::<_, _>(i, column)?),\n                    SqlType::UInt64 => Field::UInt64(block.get::<_, _>(i, column)?),\n                    SqlType::UInt32 => Field::UInt32(block.get::<_, _>(i, column)?),\n                    SqlType::UInt16 => Field::UInt16(block.get::<_, _>(i, column)?),\n                    SqlType::UInt8 => Field::UInt8(block.get::<_, _>(i, column)?),\n                    SqlType::Int64 => Field::Int64(block.get::<_, _>(i, column)?),\n                    SqlType::Int32 => Field::Int32(block.get::<_, _>(i, column)?),\n                    SqlType::Int16 => Field::Int16(block.get::<_, _>(i, column)?),\n                    SqlType::Int8 => Field::Int8(block.get::<_, _>(i, column)?),\n                    SqlType::DateTime(_) => Field::DateTime(\n                        block\n                            .get::<DateTime<Tz>, _>(i, column)?\n                            .with_timezone(&Local),\n                    ),\n                    _ => unreachable!(\"Type for column {} not implemented\", column),\n                };\n                row.0.push(field);\n            }\n            row.1 = self.columns_to_compare.clone();\n            items.push(row);\n        }\n\n        // Store all items, compute bars, and apply filtering\n        self.all_items = items;\n        self.compute_bars();\n        self.apply_filter();\n\n        return Ok(());\n    }\n\n    fn apply_filter(&mut self) {\n        let filter_text = self.filter.lock().unwrap().clone();\n        let filter_lower = filter_text.to_lowercase();\n\n        let filtered_items: Vec<Row> = if filter_text.is_empty() {\n            self.all_items.clone()\n        } else {\n            self.all_items\n                .iter()\n                .filter(|row| {\n                    // Check if any column contains the filter text (case-insensitive)\n                    row.0\n                        .iter()\n                        .any(|field| field.to_string().to_lowercase().contains(&filter_lower))\n                })\n                .cloned()\n                .collect()\n        };\n\n        self.table.set_items_stable(filtered_items);\n    }\n\n    pub fn set_bar_columns(&mut self, configs: Vec<BarColumnConfig>) {\n        self.bar_columns = configs;\n    }\n\n    fn compute_bars(&mut self) {\n        if self.bar_columns.is_empty() {\n            return;\n        }\n\n        let resolved: Vec<(usize, usize)> = self\n            .bar_columns\n            .iter()\n            .filter_map(|(bar_name, src_name)| {\n                let bar_idx = self.columns.iter().position(|c| c == bar_name)?;\n                let src_idx = self.columns.iter().position(|c| c == src_name)?;\n                Some((bar_idx, src_idx))\n            })\n            .collect();\n\n        for &(bar_idx, src_idx) in &resolved {\n            let max = self\n                .all_items\n                .iter()\n                .map(|row| field_to_f64(&row.0[src_idx]))\n                .fold(0.0_f64, f64::max);\n\n            for row in &mut self.all_items {\n                let value = field_to_f64(&row.0[src_idx]);\n                row.0[bar_idx] = Field::String(render_bar(value, max));\n            }\n        }\n    }\n\n    pub fn set_on_submit<F>(&mut self, cb: F)\n    where\n        F: Fn(&mut Cursive, Vec<&'static str>, Row) + Send + Sync + 'static,\n    {\n        self.on_submit = Some(Arc::new(cb));\n    }\n\n    pub fn new(\n        context: ContextArc,\n        view_name: &'static str,\n        sort_by: &'static str,\n        columns: Vec<&'static str>,\n        columns_to_compare: Vec<&'static str>,\n        query: String,\n    ) -> Result<OnEventView<Self>> {\n        let delay = context.lock().unwrap().options.view.delay_interval;\n\n        let update_callback_context = context.clone();\n        let update_callback = move |force: bool| {\n            update_callback_context\n                .lock()\n                .unwrap()\n                .worker\n                .send(force, WorkerEvent::SQLQuery(view_name, query.clone()));\n        };\n\n        let columns = parse_columns(&columns);\n\n        // Convert column names to indices\n        let columns_to_compare: Vec<usize> = columns_to_compare\n            .iter()\n            .map(|&col_name| {\n                columns\n                    .iter()\n                    .position(|&c| c == col_name)\n                    .unwrap_or_else(|| panic!(\"Column '{}' not found in columns list\", col_name))\n            })\n            .collect();\n\n        let mut table = TableView::<Row, u8>::new();\n        for (i, column) in columns.iter().enumerate() {\n            if column.starts_with('_') {\n                continue;\n            }\n            let min_width = column.len();\n\n            // Use width_min for columns in columns_to_compare (they should expand)\n            if columns_to_compare.contains(&i) {\n                table.add_column(i as u8, column.to_string(), |c| c.width_min(min_width));\n            } else {\n                let max_width = 20; // Reasonable max for most columns\n                table.add_column(i as u8, column.to_string(), |c| {\n                    c.width_min_max(min_width, max_width)\n                });\n            }\n        }\n        let sort_by_column = columns\n            .iter()\n            .enumerate()\n            .find_map(|(i, c)| if *c == sort_by { Some(i) } else { None })\n            .expect(\"sort_by column not found in columns\");\n        table.sort_by(sort_by_column as u8, Ordering::Greater);\n        table.set_on_submit(|siv, _row, index| {\n            if index.is_none() {\n                return;\n            }\n\n            let (on_submit, columns, item) = siv\n                .call_on_name(view_name, |table: &mut OnEventView<SQLQueryView>| {\n                    let table = table.get_inner_mut();\n                    let columns = table.columns.clone();\n                    let item = table.table.borrow_item(index.unwrap()).unwrap();\n                    return (table.on_submit.clone(), columns, item.clone());\n                })\n                .unwrap();\n            if let Some(on_submit) = on_submit {\n                on_submit(siv, columns, item);\n            }\n        });\n\n        let bg_runner_cv = context.lock().unwrap().background_runner_cv.clone();\n        let bg_runner_force = context.lock().unwrap().background_runner_force.clone();\n        let mut bg_runner = BackgroundRunner::new(delay, bg_runner_cv, bg_runner_force);\n        bg_runner.start(update_callback);\n\n        let filter = Arc::new(Mutex::new(String::new()));\n\n        let view = SQLQueryView {\n            table,\n            columns,\n            columns_to_compare,\n            on_submit: None,\n            all_items: Vec::new(),\n            filter: filter.clone(),\n            bar_columns: Vec::new(),\n            bg_runner,\n        };\n\n        // Wrap with OnEventView to add '/' key binding for filtering\n        let event_view = OnEventView::new(view).on_event('/', move |siv: &mut Cursive| {\n            let filter_cb = move |siv: &mut Cursive, text: &str| {\n                siv.call_on_name(view_name, |v: &mut OnEventView<SQLQueryView>| {\n                    let v = v.get_inner_mut();\n                    log::info!(\"Set filter to '{}'\", text);\n                    *v.filter.lock().unwrap() = text.to_string();\n                    v.apply_filter();\n                });\n                siv.pop_layer();\n            };\n\n            crate::view::show_bottom_prompt(siv, \"/\", filter_cb);\n        });\n\n        return Ok(event_view);\n    }\n}\n\nimpl ViewWrapper for SQLQueryView {\n    wrap_impl_no_move!(self.table: TableView<Row, u8>);\n}\n\nfn parse_columns(columns: &[&'static str]) -> Vec<&'static str> {\n    let mut result = Vec::new();\n    for column in columns.iter() {\n        // NOTE: this is broken for \"x AS `foo bar`\"\n        let column_name = column.split(' ').next_back().unwrap();\n        result.push(column_name);\n    }\n    return result;\n}\n"
  },
  {
    "path": "src/view/summary_view.rs",
    "content": "use chrono::{DateTime, Local};\nuse cursive::{\n    Printer, Vec2,\n    event::{AnyCb, Event, EventResult},\n    theme::BaseColor,\n    utils::markup::StyledString,\n    view::{Finder, Nameable, Resizable, Selector, View},\n    views,\n};\nuse humantime::format_duration;\nuse size::{Base, SizeFormatter, Style};\nuse std::rc::Rc;\nuse std::time::Duration;\n\nuse crate::common::sparkline::SparklineBuffer;\nuse crate::interpreter::{\n    BackgroundRunner, ContextArc, WorkerEvent, clickhouse::ClickHouseServerSummary,\n};\n\nconst SPARKLINE_CAPACITY: usize = 60;\nconst SPARKLINE_WIDTH: usize = 8;\n\nstruct SparklineSet {\n    cpu: SparklineBuffer,\n    memory: SparklineBuffer,\n    queries: SparklineBuffer,\n    merges: SparklineBuffer,\n}\n\nimpl SparklineSet {\n    fn new() -> Self {\n        Self {\n            cpu: SparklineBuffer::new(SPARKLINE_CAPACITY),\n            memory: SparklineBuffer::new(SPARKLINE_CAPACITY),\n            queries: SparklineBuffer::new(SPARKLINE_CAPACITY),\n            merges: SparklineBuffer::new(SPARKLINE_CAPACITY),\n        }\n    }\n}\n\npub struct SummaryView {\n    prev_summary: Option<ClickHouseServerSummary>,\n    prev_update_time: Option<DateTime<Local>>,\n\n    layout: views::LinearLayout,\n    sparklines: SparklineSet,\n\n    #[allow(unused)]\n    bg_runner: BackgroundRunner,\n}\n\nfn get_color_for_ratio(used: u64, total: u64) -> cursive::theme::Color {\n    let q = used as f64 / total as f64;\n    return if q > 0.90 {\n        BaseColor::Red.dark()\n    } else if q > 0.5 {\n        BaseColor::Yellow.dark()\n    } else {\n        BaseColor::Green.dark()\n    };\n}\n\nfn get_color_for_bytes(bytes: u64) -> cursive::theme::Color {\n    const TB: u64 = 1 << 40;\n    const PB: u64 = 1 << 50;\n    if bytes > PB {\n        BaseColor::Yellow.light()\n    } else if bytes > 100 * TB {\n        BaseColor::Magenta.dark()\n    } else if bytes > TB {\n        BaseColor::Cyan.dark()\n    } else {\n        BaseColor::White.dark()\n    }\n}\n\n// TODO add new information:\n// - page cache usage (should be diffed)\nimpl SummaryView {\n    pub fn new(context: ContextArc) -> Self {\n        let delay = context.lock().unwrap().options.view.delay_interval;\n\n        let update_callback_context = context.clone();\n        let update_callback = move |force: bool| {\n            update_callback_context\n                .lock()\n                .unwrap()\n                .worker\n                .send(force, WorkerEvent::Summary);\n        };\n\n        let layout = views::LinearLayout::vertical()\n            .child(\n                views::LinearLayout::horizontal()\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Uptime:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"uptime\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Servers:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"servers\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Data:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"total_data\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"CPU:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"cpu\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Queries:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"queries\"))\n                    .child(views::TextView::new(\"\").with_name(\"optional_metrics\")),\n            )\n            .child(\n                views::LinearLayout::horizontal()\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Net recv:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"net_recv\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Net sent:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"net_sent\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Read:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"disk_read\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Write:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"disk_write\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Selected rows:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"selected_rows\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Inserted rows:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"inserted_rows\")),\n            )\n            .child(\n                views::LinearLayout::horizontal()\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Threads:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"threads\"))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Pools:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"pools\")),\n            )\n            .child(\n                views::LinearLayout::horizontal()\n                    .child(views::TextView::new(StyledString::styled(\n                        \"Memory:\",\n                        BaseColor::Cyan.dark(),\n                    )))\n                    .child(views::DummyView.fixed_width(1))\n                    .child(views::TextView::new(\"\").with_name(\"mem\")),\n            );\n\n        let bg_runner_cv = context.lock().unwrap().background_runner_cv.clone();\n        let bg_runner_force = context\n            .lock()\n            .unwrap()\n            .background_runner_summary_force\n            .clone();\n        let mut bg_runner = BackgroundRunner::new(delay, bg_runner_cv, bg_runner_force);\n        bg_runner.start(update_callback);\n\n        return Self {\n            prev_summary: None,\n            prev_update_time: None,\n            layout,\n            sparklines: SparklineSet::new(),\n            bg_runner,\n        };\n    }\n\n    pub fn set_view_content<S>(&mut self, view_name: &str, content: S)\n    where\n        S: Into<StyledString> + Clone,\n    {\n        self.call_on_name(view_name, move |view: &mut views::TextView| {\n            view.set_content(content);\n        });\n    }\n\n    pub fn update(&mut self, summary: ClickHouseServerSummary) {\n        let fmt = Rc::new(\n            SizeFormatter::new()\n                .with_base(Base::Base2)\n                .with_style(Style::Abbreviated),\n        );\n        let fmt_ref = fmt.as_ref();\n\n        // update_interval is available only since 23.3\n        let update_interval = if summary.update_interval > 0 {\n            summary.update_interval\n        } else {\n            1\n        };\n        let now = Local::now();\n        let mut since_prev_us = (now - self.prev_update_time.unwrap_or(Local::now()))\n            .num_microseconds()\n            .unwrap_or_default() as u64;\n        if since_prev_us == 0 {\n            since_prev_us = 1;\n        }\n\n        {\n            let mut description = StyledString::new();\n            let mut add_description = |prefix: &str, value: u64| {\n                if value > 100_000_000 {\n                    if !description.is_empty() {\n                        description.append_plain(\" \");\n                    }\n                    description.append_plain(format!(\"{}: \", prefix));\n                    description.append_styled(\n                        fmt_ref.format(value as i64),\n                        get_color_for_ratio(value, summary.memory.resident),\n                    );\n                }\n            };\n\n            let mut memory_io = summary.memory.io / summary.uptime.server;\n            if let Some(prev_summary) = &self.prev_summary {\n                memory_io = (summary.memory.io.saturating_sub(prev_summary.memory.io)) * 1_000_000\n                    / since_prev_us;\n            }\n\n            add_description(\"Fragmentation\", summary.memory.fragmentation);\n\n            add_description(\"Tracked\", summary.memory.tracked);\n            add_description(\"Tables\", summary.memory.tables);\n            add_description(\"Caches\", summary.memory.caches);\n            add_description(\"Queries\", summary.memory.queries);\n            add_description(\"Merges Mutations\", summary.memory.merges_mutations);\n            add_description(\"Active Merges\", summary.memory.active_merges);\n            add_description(\"Dictionaries\", summary.memory.dictionaries);\n            add_description(\"Indexes\", summary.memory.primary_keys);\n            add_description(\"Index Granulas\", summary.memory.index_granularity);\n            add_description(\"IO\", memory_io);\n            add_description(\"Async Inserts\", summary.memory.async_inserts);\n\n            let memory_no_category = summary\n                .memory\n                .tracked\n                .saturating_sub(summary.memory.tables)\n                .saturating_sub(summary.memory.caches)\n                .saturating_sub(summary.memory.queries)\n                .saturating_sub(summary.memory.active_merges)\n                .saturating_sub(summary.memory.dictionaries)\n                .saturating_sub(summary.memory.primary_keys)\n                .saturating_sub(summary.memory.index_granularity)\n                .saturating_sub(memory_io)\n                .saturating_sub(summary.memory.async_inserts);\n            add_description(\"Unknown\", memory_no_category);\n\n            self.sparklines.memory.push(summary.memory.resident as f64);\n            let mut content = StyledString::plain(\"\");\n            content.append_styled(\n                fmt_ref.format(summary.memory.resident as i64),\n                get_color_for_ratio(summary.memory.resident, summary.memory.os_total),\n            );\n            content.append_plain(\" / \");\n            content.append_plain(fmt_ref.format(summary.memory.os_total as i64));\n            let spark = self.sparklines.memory.render(SPARKLINE_WIDTH);\n            if !spark.is_empty() {\n                content.append_plain(\" \");\n                content.append_styled(spark, BaseColor::White.dark());\n            }\n            content.append_plain(\" (\");\n            content.append(description);\n            content.append_plain(\")\");\n\n            self.set_view_content(\"mem\", content);\n        }\n\n        {\n            let used_cpus = summary.cpu.user + summary.cpu.system;\n            self.sparklines.cpu.push(used_cpus as f64);\n            let mut content = StyledString::plain(\"\");\n            content.append_styled(\n                used_cpus.to_string(),\n                get_color_for_ratio(used_cpus, summary.cpu.count),\n            );\n            content.append_plain(\" / \");\n            content.append_plain(summary.cpu.count.to_string());\n            let spark = self.sparklines.cpu.render(SPARKLINE_WIDTH);\n            if !spark.is_empty() {\n                content.append_plain(\" \");\n                content.append_styled(spark, BaseColor::White.dark());\n            }\n\n            self.set_view_content(\"cpu\", content);\n        }\n\n        {\n            let mut basic: Vec<String> = Vec::new();\n            let mut add_basic = |prefix: &str, value: u64| {\n                if value > 0 {\n                    basic.push(format!(\"{}: {}\", prefix, value));\n                }\n            };\n            add_basic(\"HTTP\", summary.threads.http);\n            add_basic(\"TCP\", summary.threads.tcp);\n            add_basic(\"Interserver\", summary.threads.interserver);\n\n            self.set_view_content(\n                \"threads\",\n                format!(\n                    \"{} / {} ({})\",\n                    summary.threads.os_runnable,\n                    summary.threads.os_total,\n                    basic.join(\", \"),\n                ),\n            );\n        }\n\n        {\n            let mut pools = StyledString::new();\n            let mut add_pool = |prefix: &str, value: u64| {\n                if value > 0 {\n                    pools.append(StyledString::styled(\n                        format!(\"{}: {} \", prefix, value),\n                        get_color_for_ratio(value, summary.cpu.count),\n                    ));\n                }\n            };\n            add_pool(\"Merges\", summary.threads.pools.merges_mutations);\n            add_pool(\"Fetches\", summary.threads.pools.fetches);\n            add_pool(\"Common\", summary.threads.pools.common);\n            add_pool(\"Moves\", summary.threads.pools.moves);\n            add_pool(\"Schedule\", summary.threads.pools.schedule);\n            add_pool(\"Buffer\", summary.threads.pools.buffer_flush);\n            add_pool(\"Distributed\", summary.threads.pools.distributed);\n            add_pool(\"Brokers\", summary.threads.pools.message_broker);\n            add_pool(\"Backups\", summary.threads.pools.backups);\n            add_pool(\"IO\", summary.threads.pools.io);\n            add_pool(\"RemoteIO\", summary.threads.pools.remote_io);\n            add_pool(\"Queries\", summary.threads.pools.queries);\n\n            self.set_view_content(\"pools\", pools);\n        }\n\n        self.set_view_content(\n            \"net_recv\",\n            fmt_ref.format((summary.network.receive_bytes / update_interval) as i64),\n        );\n        self.set_view_content(\n            \"net_sent\",\n            fmt_ref.format((summary.network.send_bytes / update_interval) as i64),\n        );\n\n        self.set_view_content(\n            \"disk_read\",\n            fmt_ref.format((summary.blkdev.read_bytes / update_interval) as i64),\n        );\n        self.set_view_content(\n            \"disk_write\",\n            fmt_ref.format((summary.blkdev.write_bytes / update_interval) as i64),\n        );\n\n        let mut selected_rows = summary.rows.selected / summary.uptime.server;\n        let mut inserted_rows = summary.rows.inserted / summary.uptime.server;\n        if let Some(prev_summary) = &self.prev_summary {\n            selected_rows = (summary\n                .rows\n                .selected\n                .saturating_sub(prev_summary.rows.selected))\n                * 1_000_000\n                / since_prev_us;\n            inserted_rows = (summary\n                .rows\n                .inserted\n                .saturating_sub(prev_summary.rows.inserted))\n                * 1_000_000\n                / since_prev_us;\n        }\n        self.set_view_content(\"selected_rows\", fmt_ref.format(selected_rows as i64));\n        self.set_view_content(\"inserted_rows\", fmt_ref.format(inserted_rows as i64));\n\n        self.set_view_content(\n            \"uptime\",\n            format_duration(Duration::from_secs(summary.uptime.server)).to_string(),\n        );\n\n        self.set_view_content(\"servers\", summary.servers.to_string());\n        {\n            let fmt_rows = SizeFormatter::new()\n                .with_base(Base::Base10)\n                .with_style(Style::Abbreviated);\n            let mut content = StyledString::new();\n            content.append_styled(\n                fmt_rows.format(summary.storages.total_rows as i64),\n                get_color_for_bytes(summary.storages.total_bytes),\n            );\n            content.append_plain(\" / \");\n            content.append_styled(\n                fmt_ref.format(summary.storages.total_bytes as i64),\n                get_color_for_bytes(summary.storages.total_bytes),\n            );\n            self.set_view_content(\"total_data\", content);\n        }\n\n        {\n            self.sparklines.queries.push(summary.queries as f64);\n            let mut content = StyledString::plain(\"\");\n            content.append_styled(\n                summary.queries.to_string(),\n                get_color_for_ratio(summary.queries, summary.servers * 100),\n            );\n            let spark = self.sparklines.queries.render(SPARKLINE_WIDTH);\n            if !spark.is_empty() {\n                content.append_plain(\" \");\n                content.append_styled(spark, BaseColor::White.dark());\n            }\n            self.set_view_content(\"queries\", content);\n        }\n\n        {\n            self.sparklines.merges.push(summary.merges as f64);\n\n            let mut opt = StyledString::new();\n            let mut add_opt = |label: &str, content: StyledString| {\n                if !opt.is_empty() {\n                    opt.append_plain(\" \");\n                }\n                opt.append_styled(label, BaseColor::Cyan.dark());\n                opt.append_plain(\" \");\n                opt.append(content);\n            };\n\n            if summary.merges > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    summary.merges.to_string(),\n                    get_color_for_ratio(summary.merges, summary.servers * 20),\n                );\n                let spark = self.sparklines.merges.render(SPARKLINE_WIDTH);\n                if !spark.is_empty() {\n                    c.append_plain(\" \");\n                    c.append_styled(spark, BaseColor::White.dark());\n                }\n                add_opt(\"Merges:\", c);\n            }\n\n            if summary.mutations > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    summary.mutations.to_string(),\n                    get_color_for_ratio(summary.mutations, summary.servers * 8),\n                );\n                add_opt(\"Mutations:\", c);\n            }\n\n            if summary.fetches > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    summary.fetches.to_string(),\n                    get_color_for_ratio(summary.fetches, summary.servers * 20),\n                );\n                add_opt(\"Fetches:\", c);\n            }\n\n            if summary.replication_queue > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    summary.replication_queue.to_string(),\n                    get_color_for_ratio(summary.replication_queue, summary.servers * 20),\n                );\n                c.append(\" (\");\n                c.append_styled(\n                    summary.replication_queue_tries.to_string(),\n                    get_color_for_ratio(\n                        summary.replication_queue_tries,\n                        summary.replication_queue * 2,\n                    ),\n                );\n                c.append(\")\");\n                add_opt(\"RepQueue:\", c);\n            }\n\n            if summary.storages.buffer_bytes > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    fmt_ref.format(summary.storages.buffer_bytes as i64),\n                    get_color_for_ratio(summary.storages.buffer_bytes, summary.memory.os_total),\n                );\n                add_opt(\"Buffers:\", c);\n            }\n\n            if summary.storages.distributed_insert_files > 0 {\n                let mut c = StyledString::new();\n                c.append_styled(\n                    summary.storages.distributed_insert_files.to_string(),\n                    get_color_for_ratio(summary.storages.distributed_insert_files, 10000),\n                );\n                add_opt(\"DistInserts:\", c);\n            }\n\n            self.set_view_content(\"optional_metrics\", opt);\n        }\n\n        self.prev_summary = Some(summary);\n        self.prev_update_time = Some(now);\n    }\n}\n\nimpl View for SummaryView {\n    fn draw(&self, printer: &Printer<'_, '_>) {\n        self.layout.draw(printer);\n    }\n\n    fn needs_relayout(&self) -> bool {\n        return self.layout.needs_relayout();\n    }\n\n    fn layout(&mut self, size: Vec2) {\n        self.layout.layout(size);\n    }\n\n    fn required_size(&mut self, req: Vec2) -> Vec2 {\n        return self.layout.required_size(req);\n    }\n\n    fn on_event(&mut self, event: Event) -> EventResult {\n        return self.layout.on_event(event);\n    }\n\n    fn call_on_any(&mut self, selector: &Selector<'_>, callback: AnyCb<'_>) {\n        self.layout.call_on_any(selector, callback);\n    }\n\n    // FIXME: do we need other methods?\n}\n"
  },
  {
    "path": "src/view/table_view.rs",
    "content": "//\n// Copied from https://github.com/BonsaiDen/cursive_table_view\n//\n// And extended to support:\n// - Adopt to recent cursive changes\n// - Add ability not to follow selected item in the table\n// - Column resize on mouse drag\n// - Column removal on middle mouse press\n// - Better navigation\n//   - j/k -- for navigation\n//   - PgUp/PgDown -- scroll the whole page\n// - Calculate column width based on the input rows\n//   - Add new constraint Min/MinMax\n//\n\n//! A basic table view implementation for [cursive](https://crates.io/crates/cursive).\n#![deny(\n    missing_docs,\n    missing_copy_implementations,\n    trivial_casts,\n    trivial_numeric_casts,\n    unsafe_code,\n    unused_import_braces,\n    unused_qualifications\n)]\n\n// STD Dependencies -----------------------------------------------------------\nuse std::cmp::{self, Ordering};\nuse std::collections::HashMap;\nuse std::hash::Hash;\nuse std::sync::{Arc, Mutex};\n\n// External Dependencies ------------------------------------------------------\nuse cursive::{\n    Cursive, Printer, Rect, With,\n    align::HAlign,\n    direction::Direction,\n    event::{Callback, Event, EventResult, Key, MouseButton, MouseEvent},\n    theme::{self, BaseColor, Color, Effect, Style},\n    utils::markup::StyledString,\n    vec::Vec2,\n    view::{CannotFocus, View, scroll},\n};\nuse unicode_width::UnicodeWidthStr;\n\n/// A trait for displaying and sorting items inside a\n/// [`TableView`](struct.TableView.html).\npub trait TableViewItem<H>: Clone + Sized\nwhere\n    H: Eq + Hash + Copy + Clone + 'static,\n{\n    /// Method returning a string representation of the item for the\n    /// specified column from type `H`.\n    fn to_column(&self, column: H) -> String;\n\n    /// Method comparing two items via their specified column from type `H`.\n    fn cmp(&self, other: &Self, column: H) -> Ordering\n    where\n        Self: Sized;\n\n    /// Method returning a styled string representation of the item for the\n    /// specified column from type `H`. Default implementation returns unstyled text.\n    fn to_column_styled(&self, column: H) -> StyledString {\n        StyledString::plain(self.to_column(column))\n    }\n}\n\n/// Callback used when a column is sorted.\n///\n/// It takes the column and the ordering as input.\n///\n/// This is a private type to help readability.\ntype OnSortCallback<H> = Arc<dyn Fn(&mut Cursive, H, Ordering) + Send + Sync>;\n\n/// Callback taking as argument the row and the index of an element.\n///\n/// This is a private type to help readability.\ntype IndexCallback = Arc<dyn Fn(&mut Cursive, Option<usize>, Option<usize>) + Send + Sync>;\n\n/// View to select an item among a list, supporting multiple columns for sorting.\n///\n/// # Examples\n///\n/// ```ignore\n/// # extern crate cursive;\n/// # use std::cmp::Ordering;\n/// # use chdig::view::table_view::{TableView, TableViewItem};\n/// # use cursive::align::HAlign;\n/// # fn main() {\n/// // Provide a type for the table's columns\n/// #[derive(Copy, Clone, PartialEq, Eq, Hash)]\n/// enum BasicColumn {\n///     Name,\n///     Count,\n///     Rate\n/// }\n///\n/// // Define the item type\n/// #[derive(Clone, Debug)]\n/// struct Foo {\n///     name: String,\n///     count: usize,\n///     rate: usize\n/// }\n///\n/// impl TableViewItem<BasicColumn> for Foo {\n///\n///     fn to_column(&self, column: BasicColumn) -> String {\n///         match column {\n///             BasicColumn::Name => self.name.to_string(),\n///             BasicColumn::Count => format!(\"{}\", self.count),\n///             BasicColumn::Rate => format!(\"{}\", self.rate)\n///         }\n///     }\n///\n///     fn cmp(&self, other: &Self, column: BasicColumn) -> Ordering where Self: Sized {\n///         match column {\n///             BasicColumn::Name => self.name.cmp(&other.name),\n///             BasicColumn::Count => self.count.cmp(&other.count),\n///             BasicColumn::Rate => self.rate.cmp(&other.rate)\n///         }\n///     }\n///\n/// }\n///\n/// // Configure the actual table with adaptive column widths\n/// let table = TableView::<Foo, BasicColumn>::new()\n///                      .column(BasicColumn::Name, \"Name\", |c| c.width_min(10))\n///                      .column(BasicColumn::Count, \"Count\", |c| c.width_min_max(5, 10).align(HAlign::Center))\n///                      .column(BasicColumn::Rate, \"Rate\", |c| {\n///                          c.ordering(Ordering::Greater).align(HAlign::Right).width_min_max(4, 10)\n///                      })\n///                      .default_column(BasicColumn::Name);\n/// # }\n/// ```\npub struct TableView<T, H> {\n    enabled: bool,\n    scroll_core: scroll::Core,\n    needs_relayout: bool,\n\n    column_select: bool,\n    columns: Vec<TableColumn<H>>,\n    column_indicies: HashMap<H, usize>,\n\n    focus: Option<usize>,\n    items: Vec<T>,\n    rows_to_items: Vec<usize>,\n\n    on_sort: Option<OnSortCallback<H>>,\n    // TODO Pass drawing offsets into the handlers so a popup menu\n    // can be created easily?\n    on_submit: Option<IndexCallback>,\n    on_select: Option<IndexCallback>,\n\n    // Column resize state\n    resizing_column: Option<usize>,\n    resize_start_x: usize,\n    resize_start_width: usize,\n\n    // Track last layout size for page up/down navigation\n    last_size: Arc<Mutex<Vec2>>,\n\n    // Cached content widths for Min/MinMax columns (calculated when items change)\n    content_widths: HashMap<usize, usize>,\n\n    title: Option<String>,\n}\n\ncursive::impl_scroller!(TableView < T, H > ::scroll_core);\n\nimpl<T, H> Default for TableView<T, H>\nwhere\n    T: TableViewItem<H> + PartialEq,\n    H: Eq + Hash + Copy + Clone + Send + Sync + 'static,\n{\n    /// Creates a new empty `TableView` without any columns.\n    ///\n    /// See [`TableView::new()`].\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl<T, H> TableView<T, H>\nwhere\n    T: TableViewItem<H> + PartialEq,\n    H: Eq + Hash + Copy + Clone + Send + Sync + 'static,\n{\n    /// Sets the contained items of the table.\n    ///\n    /// The currently active sort order is preserved and will be applied to all\n    /// items.\n    ///\n    /// Compared to `set_items`, the current selection will be preserved.\n    /// (But this is only available for `T: PartialEq`.)\n    pub fn set_items_stable(&mut self, items: Vec<T>) {\n        // Preserve selection\n        let new_location = self.item().and_then(|old_item| {\n            let old_item = &self.items[old_item];\n            items.iter().position(|new| new == old_item)\n        });\n\n        self.set_items_and_focus(items, new_location);\n    }\n}\n\n#[allow(dead_code)]\nimpl<T, H> TableView<T, H>\nwhere\n    T: TableViewItem<H>,\n    H: Eq + Hash + Copy + Clone + Send + Sync + 'static,\n{\n    /// Creates a new empty `TableView` without any columns.\n    ///\n    /// A TableView should be accompanied by a enum of type `H` representing\n    /// the table columns.\n    pub fn new() -> Self {\n        Self {\n            enabled: true,\n            scroll_core: scroll::Core::new(),\n            needs_relayout: true,\n\n            column_select: false,\n            columns: Vec::new(),\n            column_indicies: HashMap::new(),\n\n            focus: None,\n            items: Vec::new(),\n            rows_to_items: Vec::new(),\n\n            on_sort: None,\n            on_submit: None,\n            on_select: None,\n\n            resizing_column: None,\n            resize_start_x: 0,\n            resize_start_width: 0,\n\n            last_size: Arc::new(Mutex::new(Vec2 { x: 1, y: 1 })),\n            content_widths: HashMap::new(),\n            title: None,\n        }\n    }\n\n    /// Adds a column for the specified table colum from type `H` along with\n    /// a title for its visual display.\n    ///\n    /// The provided callback can be used to further configure the\n    /// created [`TableColumn`](struct.TableColumn.html).\n    pub fn column<S: Into<String>, C: FnOnce(TableColumn<H>) -> TableColumn<H>>(\n        mut self,\n        column: H,\n        title: S,\n        callback: C,\n    ) -> Self {\n        self.add_column(column, title, callback);\n        self\n    }\n\n    /// Adds a column for the specified table colum from type `H` along with\n    /// a title for its visual display.\n    ///\n    /// The provided callback can be used to further configure the\n    /// created [`TableColumn`](struct.TableColumn.html).\n    pub fn add_column<S: Into<String>, C: FnOnce(TableColumn<H>) -> TableColumn<H>>(\n        &mut self,\n        column: H,\n        title: S,\n        callback: C,\n    ) {\n        self.insert_column(self.columns.len(), column, title, callback);\n    }\n\n    /// Remove a column.\n    pub fn remove_column(&mut self, i: usize) {\n        // Update the existing indices\n        for column in &self.columns[i + 1..] {\n            *self.column_indicies.get_mut(&column.column).unwrap() -= 1;\n        }\n\n        let column = self.columns.remove(i);\n        self.column_indicies.remove(&column.column);\n        self.needs_relayout = true;\n    }\n\n    /// Adds a column for the specified table colum from type `H` along with\n    /// a title for its visual display.\n    ///\n    /// The provided callback can be used to further configure the\n    /// created [`TableColumn`](struct.TableColumn.html).\n    pub fn insert_column<S: Into<String>, C: FnOnce(TableColumn<H>) -> TableColumn<H>>(\n        &mut self,\n        i: usize,\n        column: H,\n        title: S,\n        callback: C,\n    ) {\n        // Update all existing indices\n        for column in &self.columns[i..] {\n            *self.column_indicies.get_mut(&column.column).unwrap() += 1;\n        }\n\n        self.column_indicies.insert(column, i);\n        self.columns\n            .insert(i, callback(TableColumn::new(column, title.into())));\n\n        // Make the first colum the default one\n        if self.columns.len() == 1 {\n            self.set_default_column(column);\n        }\n        self.needs_relayout = true;\n    }\n\n    /// Sets the initially active column of the table.\n    pub fn default_column(mut self, column: H) -> Self {\n        self.set_default_column(column);\n        self\n    }\n\n    /// Sets the initially active column of the table.\n    pub fn set_default_column(&mut self, column: H) {\n        if self.column_indicies.contains_key(&column) {\n            for c in &mut self.columns {\n                c.selected = c.column == column;\n                if c.selected {\n                    c.order = c.default_order;\n                } else {\n                    c.order = Ordering::Equal;\n                }\n            }\n        }\n    }\n\n    /// Sorts the table using the specified table `column` and the passed\n    /// `order`.\n    pub fn sort_by(&mut self, column: H, order: Ordering) {\n        if self.column_indicies.contains_key(&column) {\n            for c in &mut self.columns {\n                // Move selection back to the sorted column.\n                c.selected = c.column == column;\n                if c.selected {\n                    c.order = order;\n                } else {\n                    c.order = Ordering::Equal;\n                }\n            }\n        }\n\n        self.sort_items(column, order);\n    }\n\n    /// Sorts the table using the currently active column and its\n    /// ordering.\n    pub fn sort(&mut self) {\n        if let Some((column, order)) = self.order() {\n            self.sort_items(column, order);\n        }\n    }\n\n    /// Returns the currently active column that is used for sorting\n    /// along with its ordering.\n    ///\n    /// Might return `None` if there are currently no items in the table\n    /// and it has not been sorted yet.\n    pub fn order(&self) -> Option<(H, Ordering)> {\n        for c in &self.columns {\n            if c.order != Ordering::Equal {\n                return Some((c.column, c.order));\n            }\n        }\n        None\n    }\n\n    /// Disables this view.\n    ///\n    /// A disabled view cannot be selected.\n    pub fn disable(&mut self) {\n        self.enabled = false;\n    }\n\n    /// Re-enables this view.\n    pub fn enable(&mut self) {\n        self.enabled = true;\n    }\n\n    /// Enable or disable this view.\n    pub fn set_enabled(&mut self, enabled: bool) {\n        self.enabled = enabled;\n    }\n\n    /// Returns `true` if this view is enabled.\n    pub fn is_enabled(&self) -> bool {\n        self.enabled\n    }\n\n    /// Sets a callback to be used when a selected column is sorted by\n    /// pressing `<Enter>`.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.set_on_sort(|siv: &mut Cursive, column: BasicColumn, order: Ordering| {\n    ///\n    /// });\n    /// ```\n    pub fn set_on_sort<F>(&mut self, cb: F)\n    where\n        F: Fn(&mut Cursive, H, Ordering) + Send + Sync + 'static,\n    {\n        self.on_sort = Some(Arc::new(move |s, h, o| cb(s, h, o)));\n    }\n\n    /// Sets a callback to be used when a selected column is sorted by\n    /// pressing `<Enter>`.\n    ///\n    /// Chainable variant.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.on_sort(|siv: &mut Cursive, column: BasicColumn, order: Ordering| {\n    ///\n    /// });\n    /// ```\n    pub fn on_sort<F>(self, cb: F) -> Self\n    where\n        F: Fn(&mut Cursive, H, Ordering) + Send + Sync + 'static,\n    {\n        self.with(|t| t.set_on_sort(cb))\n    }\n\n    /// Sets a callback to be used when `<Enter>` is pressed while an item\n    /// is selected.\n    ///\n    /// Both the currently selected row and the index of the corresponding item\n    /// within the underlying storage vector will be given to the callback.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.set_on_submit(|siv: &mut Cursive, row: Option<usize>, index: Option<usize>| {\n    ///\n    /// });\n    /// ```\n    pub fn set_on_submit<F>(&mut self, cb: F)\n    where\n        F: Fn(&mut Cursive, Option<usize>, Option<usize>) + Send + Sync + 'static,\n    {\n        self.on_submit = Some(Arc::new(move |s, row, index| cb(s, row, index)));\n    }\n\n    /// Sets a callback to be used when `<Enter>` is pressed while an item\n    /// is selected.\n    ///\n    /// Both the currently selected row and the index of the corresponding item\n    /// within the underlying storage vector will be given to the callback.\n    ///\n    /// Chainable variant.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.on_submit(|siv: &mut Cursive, row: Option<usize>, index: Option<usize>| {\n    ///\n    /// });\n    /// ```\n    pub fn on_submit<F>(self, cb: F) -> Self\n    where\n        F: Fn(&mut Cursive, Option<usize>, Option<usize>) + Send + Sync + 'static,\n    {\n        self.with(|t| t.set_on_submit(cb))\n    }\n\n    /// Sets a callback to be used when an item is selected.\n    ///\n    /// Both the currently selected row and the index of the corresponding item\n    /// within the underlying storage vector will be given to the callback.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.set_on_select(|siv: &mut Cursive, row: Option<usize>, index: Option<usize>| {\n    ///\n    /// });\n    /// ```\n    pub fn set_on_select<F>(&mut self, cb: F)\n    where\n        F: Fn(&mut Cursive, Option<usize>, Option<usize>) + Send + Sync + 'static,\n    {\n        self.on_select = Some(Arc::new(move |s, row, index| cb(s, row, index)));\n    }\n\n    /// Sets a callback to be used when an item is selected.\n    ///\n    /// Both the currently selected row and the index of the corresponding item\n    /// within the underlying storage vector will be given to the callback.\n    ///\n    /// Chainable variant.\n    ///\n    /// # Example\n    ///\n    /// ```ignore\n    /// table.on_select(|siv: &mut Cursive, row: Option<usize>, index: Option<usize>| {\n    ///\n    /// });\n    /// ```\n    pub fn on_select<F>(self, cb: F) -> Self\n    where\n        F: Fn(&mut Cursive, Option<usize>, Option<usize>) + Send + Sync + 'static,\n    {\n        self.with(|t| t.set_on_select(cb))\n    }\n\n    /// Removes all items from this view.\n    pub fn clear(&mut self) {\n        self.items.clear();\n        self.rows_to_items.clear();\n        self.focus = None;\n        self.needs_relayout = true;\n    }\n\n    /// Returns the number of items in this table.\n    pub fn len(&self) -> usize {\n        self.items.len()\n    }\n\n    /// Returns `true` if this table has no items.\n    pub fn is_empty(&self) -> bool {\n        self.items.is_empty()\n    }\n\n    /// Returns the index of the currently selected table row.\n    pub fn row(&self) -> Option<usize> {\n        if self.items.is_empty() {\n            None\n        } else {\n            self.focus\n        }\n    }\n\n    /// Selects the row at the specified index.\n    pub fn set_selected_row(&mut self, row_index: usize) {\n        self.focus = Some(row_index);\n        self.scroll_core.scroll_to_y(row_index);\n    }\n\n    /// Selects the row at the specified index.\n    ///\n    /// Chainable variant.\n    pub fn selected_row(self, row_index: usize) -> Self {\n        self.with(|t| t.set_selected_row(row_index))\n    }\n\n    /// Sets the contained items of the table.\n    ///\n    /// The currently active sort order is preserved and will be applied to all\n    /// items.\n    pub fn set_items(&mut self, items: Vec<T>) {\n        self.set_items_and_focus(items, None);\n    }\n\n    fn set_items_and_focus(&mut self, items: Vec<T>, new_location: Option<usize>) {\n        self.items = items;\n        self.rows_to_items = Vec::with_capacity(self.items.len());\n\n        for i in 0..self.items.len() {\n            self.rows_to_items.push(i);\n        }\n\n        if let Some((column, order)) = self.order() {\n            // Preserve the selected column if possible.\n            let selected_column = self.columns.iter().find(|c| c.selected).map(|c| c.column);\n            self.sort_by(column, order);\n            if let Some(column) = selected_column {\n                for c in &mut self.columns {\n                    c.selected = c.column == column;\n                }\n            }\n        }\n\n        // Calculate content widths after items are set and sorted\n        self.calculate_content_widths();\n\n        if let Some(new_location) = new_location {\n            self.set_selected_item(new_location);\n        }\n        self.needs_relayout = true;\n    }\n\n    /// Calculate content widths for Min/MinMax columns from first 100 items.\n    /// This is called when items are updated to cache the widths for layout.\n    fn calculate_content_widths(&mut self) {\n        const SAMPLE_SIZE: usize = 100;\n        let sample_count = cmp::min(SAMPLE_SIZE, self.items.len());\n\n        self.content_widths.clear();\n        for (col_idx, column) in self.columns.iter().enumerate() {\n            if let Some(TableColumnWidth::Min(_) | TableColumnWidth::MinMax(_, _)) =\n                &column.requested_width\n            {\n                // Calculate max content width from first N items\n                // Title width includes 2 chars for sorting indicator: \" ▲\"\n                let mut max_width = column.title.len() + 2;\n                for i in 0..sample_count {\n                    let item_idx = self.rows_to_items[i];\n                    let content = self.items[item_idx].to_column(column.column);\n                    max_width = cmp::max(max_width, content.len());\n                }\n                self.content_widths.insert(col_idx, max_width);\n            }\n        }\n    }\n\n    /// Sets the contained items of the table.\n    ///\n    /// The order of the items will be preserved even when the table is sorted.\n    ///\n    /// Chainable variant.\n    pub fn items(self, items: Vec<T>) -> Self {\n        self.with(|t| t.set_items(items))\n    }\n\n    /// Sets the title displayed above the table header (chainable).\n    pub fn title<S: Into<String>>(mut self, title: S) -> Self {\n        self.title = Some(title.into());\n        self\n    }\n\n    /// Sets the title displayed above the table header.\n    pub fn set_title<S: Into<String>>(&mut self, title: S) {\n        self.title = Some(title.into());\n    }\n\n    /// Returns a immmutable reference to the item at the specified index\n    /// within the underlying storage vector.\n    pub fn borrow_item(&self, index: usize) -> Option<&T> {\n        self.items.get(index)\n    }\n\n    /// Returns a mutable reference to the item at the specified index within\n    /// the underlying storage vector.\n    pub fn borrow_item_mut(&mut self, index: usize) -> Option<&mut T> {\n        self.items.get_mut(index)\n    }\n\n    /// Returns a immmutable reference to the items contained within the table.\n    pub fn borrow_items(&mut self) -> &[T] {\n        &self.items\n    }\n\n    /// Returns a mutable reference to the items contained within the table.\n    ///\n    /// Can be used to modify the items in place.\n    pub fn borrow_items_mut(&mut self) -> &mut [T] {\n        self.needs_relayout = true;\n        &mut self.items\n    }\n\n    /// Returns the index of the currently selected item within the underlying\n    /// storage vector.\n    pub fn item(&self) -> Option<usize> {\n        if let Some(focus) = self.focus {\n            self.rows_to_items.get(focus).copied()\n        } else {\n            None\n        }\n    }\n\n    /// Selects the item at the specified index within the underlying storage\n    /// vector.\n    pub fn set_selected_item(&mut self, item_index: usize) {\n        // TODO optimize the performance for very large item lists\n        if item_index < self.items.len() {\n            for (row, item) in self.rows_to_items.iter().enumerate() {\n                if *item == item_index {\n                    self.focus = Some(row);\n                    self.scroll_core.scroll_to_y(row);\n                    break;\n                }\n            }\n        }\n    }\n\n    /// Selects the item at the specified index within the underlying storage\n    /// vector.\n    ///\n    /// Chainable variant.\n    pub fn selected_item(self, item_index: usize) -> Self {\n        self.with(|t| t.set_selected_item(item_index))\n    }\n\n    /// Inserts a new item into the table.\n    ///\n    /// The currently active sort order is preserved and will be applied to the\n    /// newly inserted item.\n    ///\n    /// If no sort option is set, the item will be added to the end of the table.\n    pub fn insert_item(&mut self, item: T) {\n        self.insert_item_at(self.items.len(), item);\n    }\n\n    /// Inserts a new item into the table.\n    ///\n    /// The currently active sort order is preserved and will be applied to the\n    /// newly inserted item.\n    ///\n    /// If no sort option is set, the item will be inserted at the given index.\n    ///\n    /// # Panics\n    ///\n    /// If `index > self.len()`.\n    pub fn insert_item_at(&mut self, index: usize, item: T) {\n        self.items.push(item);\n\n        // Here we know self.items.len() > 0\n        self.rows_to_items.insert(index, self.items.len() - 1);\n\n        if let Some((column, order)) = self.order() {\n            self.sort_by(column, order);\n        }\n        self.needs_relayout = true;\n    }\n\n    /// Removes the item at the specified index within the underlying storage\n    /// vector and returns it.\n    pub fn remove_item(&mut self, item_index: usize) -> Option<T> {\n        if item_index < self.items.len() {\n            // Move the selection if the currently selected item gets removed\n            if let Some(selected_index) = self.item()\n                && selected_index == item_index\n            {\n                self.focus_up(1);\n            }\n\n            // Remove the sorted reference to the item\n            self.rows_to_items.retain(|i| *i != item_index);\n\n            // Adjust remaining references\n            for ref_index in &mut self.rows_to_items {\n                if *ref_index > item_index {\n                    *ref_index -= 1;\n                }\n            }\n            self.needs_relayout = true;\n\n            // Remove actual item from the underlying storage\n            Some(self.items.remove(item_index))\n        } else {\n            None\n        }\n    }\n\n    /// Removes all items from the underlying storage and returns them.\n    pub fn take_items(&mut self) -> Vec<T> {\n        self.set_selected_row(0);\n        self.rows_to_items.clear();\n        self.needs_relayout = true;\n        self.items.drain(0..).collect()\n    }\n}\n\nimpl<T, H> TableView<T, H>\nwhere\n    T: TableViewItem<H>,\n    H: Eq + Hash + Copy + Clone + Send + Sync + 'static,\n{\n    fn title_height(&self) -> usize {\n        if self.title.is_some() { 1 } else { 0 }\n    }\n\n    fn draw_columns<C: Fn(&Printer<'_, '_>, &TableColumn<H>)>(\n        &self,\n        printer: &Printer<'_, '_>,\n        callback: C,\n    ) {\n        let mut column_offset = 0;\n        let column_count = self.columns.len();\n        for (index, column) in self.columns.iter().enumerate() {\n            // Crop to column width (+1 for trailing space) so content\n            // can't overflow into adjacent columns or past the last column\n            let col_printer = printer\n                .offset((column_offset, 0))\n                .cropped((column.width + 1, 1))\n                .focused(true);\n\n            callback(&col_printer, column);\n\n            if 1 + index < column_count {\n                printer.print((column_offset + column.width + 1, 0), \" \");\n            }\n\n            column_offset += column.width + 2;\n        }\n    }\n\n    fn sort_items(&mut self, column: H, order: Ordering) {\n        if !self.is_empty() {\n            let old_item = self.item();\n\n            let mut rows_to_items = self.rows_to_items.clone();\n            rows_to_items.sort_by(|a, b| {\n                if order == Ordering::Less {\n                    self.items[*a].cmp(&self.items[*b], column)\n                } else {\n                    self.items[*b].cmp(&self.items[*a], column)\n                }\n            });\n            self.rows_to_items = rows_to_items;\n\n            if let Some(old_item) = old_item {\n                self.set_selected_item(old_item);\n            }\n        }\n    }\n\n    fn draw_item(&self, printer: &Printer<'_, '_>, i: usize) {\n        self.draw_columns(printer, |printer, column| {\n            let value = self.items[self.rows_to_items[i]].to_column_styled(column.column);\n            column.draw_row(printer, &value);\n        });\n    }\n\n    fn on_focus_change(&self) -> EventResult {\n        let row = self.row();\n        let index = self.item();\n        EventResult::Consumed(\n            self.on_select\n                .clone()\n                .map(|cb| Callback::from_fn(move |s| cb(s, row, index))),\n        )\n    }\n\n    fn focus_up(&mut self, n: usize) {\n        self.focus = Some(self.focus.map_or(0, |x| x - cmp::min(x, n)));\n    }\n\n    fn focus_down(&mut self, n: usize) {\n        let items = self.items.len().saturating_sub(1);\n        self.focus = Some(self.focus.map_or(0, |x| cmp::min(x + n, items)));\n    }\n\n    fn active_column(&self) -> usize {\n        self.columns.iter().position(|c| c.selected).unwrap_or(0)\n    }\n\n    fn column_cancel(&mut self) {\n        self.column_select = false;\n        for column in &mut self.columns {\n            column.selected = column.order != Ordering::Equal;\n        }\n    }\n\n    fn column_next(&mut self) -> bool {\n        let column = self.active_column();\n        if 1 + column < self.columns.len() {\n            self.columns[column].selected = false;\n            self.columns[column + 1].selected = true;\n            true\n        } else {\n            false\n        }\n    }\n\n    fn column_prev(&mut self) -> bool {\n        let column = self.active_column();\n        if column > 0 {\n            self.columns[column].selected = false;\n            self.columns[column - 1].selected = true;\n            true\n        } else {\n            false\n        }\n    }\n\n    fn column_select(&mut self) -> EventResult {\n        let next = self.active_column();\n        let column = self.columns[next].column;\n        let current = self\n            .columns\n            .iter()\n            .position(|c| c.order != Ordering::Equal)\n            .unwrap_or(0);\n\n        let order = if current != next {\n            self.columns[next].default_order\n        } else if self.columns[current].order == Ordering::Less {\n            Ordering::Greater\n        } else {\n            Ordering::Less\n        };\n\n        self.sort_by(column, order);\n\n        if let Some(on_sort) = &self.on_sort {\n            let c = &self.columns[self.active_column()];\n            let column = c.column;\n            let order = c.order;\n\n            let cb = on_sort.clone();\n            EventResult::with_cb(move |s| cb(s, column, order))\n        } else {\n            EventResult::Consumed(None)\n        }\n    }\n\n    fn column_for_x(&self, mut x: usize) -> Option<usize> {\n        for (i, col) in self.columns.iter().enumerate() {\n            x = match x.checked_sub(col.width) {\n                None => return Some(i),\n                Some(x) => x.checked_sub(2)?,\n            };\n        }\n\n        None\n    }\n\n    /// Returns the column index and edge position if mouse is near a column boundary (resize handle)\n    fn column_boundary_at(&self, x: usize) -> Option<(usize, usize)> {\n        let mut offset = 0;\n        for (i, col) in self.columns.iter().enumerate() {\n            // Match draw_columns: separator at column.width + 1\n            let separator_pos = offset + col.width + 1;\n            // Check if within 2 characters of the separator\n            if x >= separator_pos.saturating_sub(1)\n                && x <= separator_pos + 1\n                && i + 1 < self.columns.len()\n            {\n                return Some((i, offset));\n            }\n            // Match draw_columns: next column at column.width + 2\n            offset += col.width + 2;\n        }\n        None\n    }\n\n    fn draw_content(&self, printer: &Printer<'_, '_>) {\n        let start = printer.content_offset.y;\n        let end = cmp::min(start + printer.output_size.y, self.rows_to_items.len());\n\n        for i in start..end {\n            let printer = printer.offset((0, i));\n            let color = if Some(i) == self.focus && self.enabled {\n                if !self.column_select && self.enabled && printer.focused {\n                    theme::ColorStyle::highlight()\n                } else {\n                    theme::ColorStyle::highlight_inactive()\n                }\n            } else {\n                theme::ColorStyle::primary()\n            };\n\n            if i < self.items.len() {\n                printer.with_color(color, |printer| {\n                    self.draw_item(printer, i);\n                });\n            }\n        }\n    }\n\n    fn layout_content(&mut self, size: Vec2) {\n        let column_count = self.columns.len();\n\n        // Use cached content widths calculated when items were set\n        // Collect column indices with their requested widths\n        let mut sized_indices: Vec<usize> = Vec::new();\n        let mut unsized_indices: Vec<usize> = Vec::new();\n\n        for (idx, column) in self.columns.iter().enumerate() {\n            if column.requested_width.is_some() {\n                sized_indices.push(idx);\n            } else {\n                unsized_indices.push(idx);\n            }\n        }\n\n        // Subtract one for the seperators between our columns (that's column_count - 1)\n        let available_width = size.x.saturating_sub(column_count.saturating_sub(1) * 2);\n\n        // Calculate widths for all requested columns\n        let mut remaining_width = available_width;\n\n        // Find all columns with Min (no max constraint) - they will share remaining space\n        let min_cols: Vec<usize> = sized_indices\n            .iter()\n            .filter(|&&idx| {\n                matches!(\n                    self.columns[idx].requested_width.as_ref().unwrap(),\n                    TableColumnWidth::Min(_)\n                )\n            })\n            .copied()\n            .collect();\n\n        // Process all columns except Min columns first\n        for &col_idx in &sized_indices {\n            if min_cols.contains(&col_idx) && unsized_indices.is_empty() {\n                // Skip Min columns for now - we'll process them at the end\n                continue;\n            }\n\n            let column = &mut self.columns[col_idx];\n            column.width = match *column.requested_width.as_ref().unwrap() {\n                TableColumnWidth::Percent(width) => cmp::min(\n                    (size.x as f32 / 100.0 * width as f32).ceil() as usize,\n                    remaining_width,\n                ),\n                TableColumnWidth::Absolute(width) => width,\n                TableColumnWidth::Min(min) => {\n                    let content_width = self.content_widths.get(&col_idx).copied().unwrap_or(min);\n                    cmp::max(min, content_width)\n                }\n                TableColumnWidth::MinMax(min, max) => {\n                    let content_width = self.content_widths.get(&col_idx).copied().unwrap_or(min);\n                    cmp::min(max, cmp::max(min, content_width))\n                }\n            };\n            remaining_width = remaining_width.saturating_sub(self.columns[col_idx].width);\n        }\n\n        // Now distribute remaining width among all Min columns\n        if !min_cols.is_empty() && unsized_indices.is_empty() {\n            let width_per_min_col = remaining_width / min_cols.len();\n            for &col_idx in &min_cols {\n                let column = &mut self.columns[col_idx];\n                if let TableColumnWidth::Min(min) = *column.requested_width.as_ref().unwrap() {\n                    column.width = cmp::max(min, width_per_min_col);\n                    remaining_width = remaining_width.saturating_sub(column.width);\n                }\n            }\n        }\n\n        // Spread the remaining with across the unsized columns\n        let remaining_columns = unsized_indices.len();\n        if remaining_columns > 0 {\n            let width_per_column =\n                (remaining_width as f32 / remaining_columns as f32).floor() as usize;\n            for &col_idx in &unsized_indices {\n                self.columns[col_idx].width = width_per_column;\n            }\n        }\n\n        self.needs_relayout = false;\n    }\n\n    fn content_required_size(&mut self, req: Vec2) -> Vec2 {\n        Vec2::new(req.x, self.rows_to_items.len())\n    }\n\n    fn on_inner_event(&mut self, event: Event) -> EventResult {\n        let last_focus = self.focus;\n        match event {\n            Event::Key(Key::Right) => {\n                if self.column_select {\n                    if !self.column_next() {\n                        return EventResult::Ignored;\n                    }\n                } else {\n                    self.column_select = true;\n                }\n            }\n            Event::Key(Key::Left) => {\n                if self.column_select {\n                    if !self.column_prev() {\n                        return EventResult::Ignored;\n                    }\n                } else {\n                    self.column_select = true;\n                }\n            }\n            Event::Key(Key::Up) => {\n                if self.column_select {\n                    self.column_cancel();\n                } else {\n                    self.focus_up(1);\n                }\n            }\n            Event::Key(Key::Down) => {\n                if self.column_select {\n                    self.column_cancel();\n                } else {\n                    self.focus_down(1);\n                }\n            }\n            Event::Key(Key::PageUp) => {\n                self.column_cancel();\n                self.focus_up(10);\n            }\n            Event::Key(Key::PageDown) => {\n                self.column_cancel();\n                self.focus_down(10);\n            }\n            Event::Key(Key::Home) => {\n                self.column_cancel();\n                self.focus = None;\n            }\n            Event::Key(Key::End) => {\n                self.column_cancel();\n                self.focus = Some(self.items.len().saturating_sub(1));\n            }\n            Event::Key(Key::Enter) => {\n                if self.column_select {\n                    return self.column_select();\n                } else if !self.is_empty() && self.on_submit.is_some() {\n                    return self.on_submit_event();\n                }\n            }\n            Event::Mouse {\n                position,\n                offset,\n                event: MouseEvent::Press(MouseButton::Left),\n            } if !self.is_empty()\n                && position\n                    .checked_sub(offset)\n                    .is_some_and(|p| Some(p.y) == self.focus) =>\n            {\n                self.column_cancel();\n                return self.on_submit_event();\n            }\n            Event::Mouse {\n                position,\n                offset,\n                event: MouseEvent::Press(_),\n            } if !self.is_empty() => match position.checked_sub(offset) {\n                Some(position) if position.y < self.rows_to_items.len() => {\n                    self.column_cancel();\n                    self.focus = Some(position.y);\n                }\n                _ => return EventResult::Ignored,\n            },\n            _ => return EventResult::Ignored,\n        }\n\n        let focus = self.focus;\n\n        if self.column_select {\n            EventResult::Consumed(None)\n        } else if !self.is_empty() && last_focus != focus {\n            self.on_focus_change()\n        } else {\n            EventResult::Ignored\n        }\n    }\n\n    fn inner_important_area(&self, size: Vec2) -> Rect {\n        Rect::from_size((0, self.focus.unwrap_or_default()), (size.x, 1))\n    }\n\n    fn on_submit_event(&mut self) -> EventResult {\n        if let Some(cb) = &self.on_submit {\n            let cb = Arc::clone(cb);\n            let row = self.row();\n            let index = self.item();\n            return EventResult::Consumed(Some(Callback::from_fn(move |s| cb(s, row, index))));\n        }\n        EventResult::Ignored\n    }\n}\n\nimpl<T, H> View for TableView<T, H>\nwhere\n    T: TableViewItem<H> + Send + Sync + 'static,\n    H: Eq + Hash + Copy + Clone + Send + Sync + 'static,\n{\n    fn draw(&self, printer: &Printer<'_, '_>) {\n        let title_height = self.title_height();\n\n        if let Some(title) = &self.title {\n            let mut styled = StyledString::new();\n            styled.append_plain(\"\\u{2500}\\u{2500}\\u{2500} \");\n            styled.append_styled(\n                title,\n                Style::from(Color::Dark(BaseColor::Cyan)).combine(Effect::Bold),\n            );\n            styled.append_styled(\n                format!(\" ({})\", self.items.len()),\n                Style::from(Color::Dark(BaseColor::Cyan)),\n            );\n            styled.append_plain(\" \\u{2500}\\u{2500}\\u{2500}\");\n            let width = printer.size.x;\n            let text_width = styled.width();\n            let offset = width.saturating_sub(text_width) / 2;\n            printer.print_styled((offset, 0), &styled);\n        }\n\n        let printer = &printer.offset((0, title_height));\n        self.draw_columns(printer, |printer, column| {\n            let color = if self.enabled && (column.order != Ordering::Equal || column.selected) {\n                if self.column_select && column.selected && self.enabled && printer.focused {\n                    theme::ColorStyle::highlight()\n                } else {\n                    theme::ColorStyle::highlight_inactive()\n                }\n            } else {\n                theme::ColorStyle::primary()\n            };\n\n            printer.with_color(color, |printer| {\n                column.draw_header(printer);\n            });\n        });\n\n        let printer = &printer.offset((0, 2)).focused(true);\n        scroll::draw(self, printer, Self::draw_content);\n    }\n\n    fn layout(&mut self, size: Vec2) {\n        let header = self.title_height() + 2;\n        *self.last_size.lock().unwrap() = size.saturating_sub((0, header));\n        scroll::layout(\n            self,\n            size.saturating_sub((0, header)),\n            self.needs_relayout,\n            Self::layout_content,\n            Self::content_required_size,\n        );\n    }\n\n    fn take_focus(&mut self, _: Direction) -> Result<EventResult, CannotFocus> {\n        self.enabled.then(EventResult::consumed).ok_or(CannotFocus)\n    }\n\n    fn on_event(&mut self, event: Event) -> EventResult {\n        if !self.enabled {\n            return EventResult::Ignored;\n        }\n\n        match event {\n            // Handle j/k navigation\n            Event::Char('k') => {\n                return self.on_event(Event::Key(Key::Up));\n            }\n            Event::Char('j') => {\n                return self.on_event(Event::Key(Key::Down));\n            }\n            // Handle page up/down navigation\n            Event::Key(Key::PageUp) => {\n                let new_row = self\n                    .row()\n                    .map(|r| {\n                        let height = self.last_size.lock().unwrap().y;\n                        if r > height { r - height + 1 } else { 0 }\n                    })\n                    .unwrap_or_default();\n                self.set_selected_row(new_row);\n                return EventResult::consumed();\n            }\n            Event::Key(Key::PageDown) => {\n                let new_row = self\n                    .row()\n                    .map(|r| {\n                        let len = self.len();\n                        let height = self.last_size.lock().unwrap().y;\n\n                        if len > height + r {\n                            r + height - 1\n                        } else if len > 0 {\n                            len - 1\n                        } else {\n                            0\n                        }\n                    })\n                    .unwrap_or_default();\n                self.set_selected_row(new_row);\n                return EventResult::consumed();\n            }\n            // Handle column resize start\n            Event::Mouse {\n                position,\n                offset,\n                event: MouseEvent::Press(MouseButton::Left),\n            } if position\n                .checked_sub(offset)\n                .is_some_and(|p| p.y == 0 || p.y == 1) =>\n            {\n                if let Some(position) = position.checked_sub(offset) {\n                    // Check if clicking on a column boundary to start resize\n                    if let Some((col_idx, _)) = self.column_boundary_at(position.x) {\n                        self.resizing_column = Some(col_idx);\n                        self.resize_start_x = position.x;\n                        self.resize_start_width = self.columns[col_idx].width;\n                        return EventResult::Consumed(None);\n                    }\n                    // Otherwise handle column selection\n                    if position.y == 0\n                        && let Some(col) = self.column_for_x(position.x)\n                    {\n                        if self.column_select && self.columns[col].selected {\n                            return self.column_select();\n                        } else {\n                            let active = self.active_column();\n                            self.columns[active].selected = false;\n                            self.columns[col].selected = true;\n                            self.column_select = true;\n                        }\n                    }\n                }\n                EventResult::Ignored\n            }\n            // Handle column resize drag\n            Event::Mouse {\n                position,\n                offset,\n                event: MouseEvent::Hold(MouseButton::Left),\n            } if self.resizing_column.is_some() => {\n                if let Some(position) = position.checked_sub(offset)\n                    && let Some(col_idx) = self.resizing_column\n                {\n                    let delta = position.x as isize - self.resize_start_x as isize;\n                    let new_width = (self.resize_start_width as isize + delta).max(5) as usize;\n\n                    // Update the column width and mark as absolute width\n                    self.columns[col_idx].width = new_width;\n                    self.columns[col_idx].requested_width =\n                        Some(TableColumnWidth::Absolute(new_width));\n                    self.needs_relayout = true;\n                }\n                EventResult::Consumed(None)\n            }\n            // Handle column resize end\n            Event::Mouse {\n                event: MouseEvent::Release(MouseButton::Left),\n                ..\n            } if self.resizing_column.is_some() => {\n                self.resizing_column = None;\n                EventResult::Consumed(None)\n            }\n            // Handle column removal on middle mouse press\n            Event::Mouse {\n                position,\n                offset,\n                event: MouseEvent::Press(MouseButton::Middle),\n            } if position\n                .checked_sub(offset)\n                .is_some_and(|p| p.y == 0 || p.y == 1) =>\n            {\n                if let Some(position) = position.checked_sub(offset)\n                    && let Some(col_idx) = self.column_for_x(position.x)\n                    && self.columns.len() > 1\n                {\n                    self.remove_column(col_idx);\n                    return EventResult::Consumed(None);\n                }\n                EventResult::Ignored\n            }\n            event => scroll::on_event(\n                self,\n                event.relativized((0, 2)),\n                Self::on_inner_event,\n                Self::inner_important_area,\n            ),\n        }\n    }\n\n    fn important_area(&self, size: Vec2) -> Rect {\n        let header = self.title_height() + 2;\n        self.inner_important_area(size.saturating_sub((0, header))) + (0, header)\n    }\n}\n\n/// A type used for the construction of columns in a\n/// [`TableView`](struct.TableView.html).\npub struct TableColumn<H> {\n    column: H,\n    title: String,\n    selected: bool,\n    alignment: HAlign,\n    order: Ordering,\n    width: usize,\n    default_order: Ordering,\n    requested_width: Option<TableColumnWidth>,\n}\n\nenum TableColumnWidth {\n    Percent(usize),\n    Absolute(usize),\n    /// Minimum width - will use content width but at least this value\n    Min(usize),\n    /// Minimum and maximum width - will use content width constrained to this range\n    MinMax(usize, usize),\n}\n\n#[allow(dead_code)]\nimpl<H: Copy + Clone + 'static> TableColumn<H> {\n    /// Sets the default ordering of the column.\n    pub fn ordering(mut self, order: Ordering) -> Self {\n        self.default_order = order;\n        self\n    }\n\n    /// Sets the horizontal text alignment of the column.\n    pub fn align(mut self, alignment: HAlign) -> Self {\n        self.alignment = alignment;\n        self\n    }\n\n    /// Sets how many characters of width this column will try to occupy.\n    pub fn width(mut self, width: usize) -> Self {\n        self.requested_width = Some(TableColumnWidth::Absolute(width));\n        self\n    }\n\n    /// Sets what percentage of the width of the entire table this column will\n    /// try to occupy.\n    pub fn width_percent(mut self, width: usize) -> Self {\n        self.requested_width = Some(TableColumnWidth::Percent(width));\n        self\n    }\n\n    /// Sets minimum width for the column - will calculate actual width from content\n    /// but use at least this value.\n    pub fn width_min(mut self, min: usize) -> Self {\n        self.requested_width = Some(TableColumnWidth::Min(min));\n        self\n    }\n\n    /// Sets minimum and maximum width for the column - will calculate actual width\n    /// from content but constrain it to this range.\n    pub fn width_min_max(mut self, min: usize, max: usize) -> Self {\n        self.requested_width = Some(TableColumnWidth::MinMax(min, max));\n        self\n    }\n\n    fn new(column: H, title: String) -> Self {\n        Self {\n            column,\n            title,\n            selected: false,\n            alignment: HAlign::Left,\n            order: Ordering::Equal,\n            width: 0,\n            default_order: Ordering::Less,\n            requested_width: None,\n        }\n    }\n\n    fn draw_header(&self, printer: &Printer<'_, '_>) {\n        let order = match self.order {\n            Ordering::Less => \"▲\",\n            Ordering::Greater => \"▼\",\n            Ordering::Equal => \" \",\n        };\n\n        let header = match self.alignment {\n            HAlign::Left => format!(\n                \"{:<width$} {}\",\n                self.title,\n                order,\n                width = self.width.saturating_sub(2)\n            ),\n            HAlign::Right => format!(\n                \"{:>width$} {}\",\n                self.title,\n                order,\n                width = self.width.saturating_sub(2)\n            ),\n            HAlign::Center => format!(\n                \"{:^width$} {}\",\n                self.title,\n                order,\n                width = self.width.saturating_sub(2)\n            ),\n        };\n\n        printer.print((0, 0), header.as_str());\n    }\n\n    fn draw_row(&self, printer: &Printer<'_, '_>, value: &StyledString) {\n        let plain_text = value.source();\n        let current_len = plain_text.width();\n        let target_width = self.width;\n\n        // Create a new styled string with proper alignment\n        let mut styled = StyledString::new();\n\n        if current_len < target_width {\n            let padding = target_width - current_len;\n            match self.alignment {\n                HAlign::Left => {\n                    styled.append(value.clone());\n                    styled.append_plain(\" \".repeat(padding + 1));\n                }\n                HAlign::Right => {\n                    styled.append_plain(\" \".repeat(padding));\n                    styled.append(value.clone());\n                    styled.append_plain(\" \");\n                }\n                HAlign::Center => {\n                    let left_padding = padding / 2;\n                    let right_padding = padding - left_padding;\n                    styled.append_plain(\" \".repeat(left_padding));\n                    styled.append(value.clone());\n                    styled.append_plain(\" \".repeat(right_padding + 1));\n                }\n            }\n        } else {\n            styled.append(value.clone());\n            styled.append_plain(\" \");\n        }\n\n        printer.print_styled((0, 0), &styled);\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[derive(Copy, Clone, PartialEq, Eq, Hash)]\n    enum SimpleColumn {\n        Name,\n    }\n\n    #[allow(dead_code)]\n    impl SimpleColumn {\n        fn as_str(&self) -> &str {\n            match *self {\n                SimpleColumn::Name => \"Name\",\n            }\n        }\n    }\n\n    #[derive(Clone, Debug)]\n    struct SimpleItem {\n        name: String,\n    }\n\n    impl TableViewItem<SimpleColumn> for SimpleItem {\n        fn to_column(&self, column: SimpleColumn) -> String {\n            match column {\n                SimpleColumn::Name => self.name.to_string(),\n            }\n        }\n\n        fn cmp(&self, other: &Self, column: SimpleColumn) -> Ordering\n        where\n            Self: Sized,\n        {\n            match column {\n                SimpleColumn::Name => self.name.cmp(&other.name),\n            }\n        }\n    }\n\n    fn setup_test_table() -> TableView<SimpleItem, SimpleColumn> {\n        TableView::<SimpleItem, SimpleColumn>::new()\n            .column(SimpleColumn::Name, \"Name\", |c| c.width_percent(20))\n    }\n\n    #[test]\n    fn should_insert_into_existing_table() {\n        let mut simple_table = setup_test_table();\n\n        let mut simple_items = Vec::new();\n\n        for i in 1..=10 {\n            simple_items.push(SimpleItem {\n                name: format!(\"{} - Name\", i),\n            });\n        }\n\n        // Insert First Batch of Items\n        simple_table.set_items(simple_items);\n\n        // Test for Additional item insertion\n        simple_table.insert_item(SimpleItem {\n            name: format!(\"{} Name\", 11),\n        });\n\n        assert!(simple_table.len() == 11);\n    }\n\n    #[test]\n    fn should_insert_into_empty_table() {\n        let mut simple_table = setup_test_table();\n\n        // Test for First item insertion\n        simple_table.insert_item(SimpleItem {\n            name: format!(\"{} Name\", 1),\n        });\n\n        assert!(simple_table.len() == 1);\n    }\n}\n\n/// This is the same as cursive::wrap_impl(), but without into_inner() method, that moves out the\n/// value, since our views implements drop() and cannot be moved out.\n#[macro_export]\nmacro_rules! wrap_impl_no_move {\n    (self.$v:ident: $t:ty) => {\n        type V = $t;\n\n        fn with_view<F, R>(&self, f: F) -> ::std::option::Option<R>\n        where\n            F: ::std::ops::FnOnce(&Self::V) -> R,\n        {\n            ::std::option::Option::Some(f(&self.$v))\n        }\n\n        fn with_view_mut<F, R>(&mut self, f: F) -> ::std::option::Option<R>\n        where\n            F: ::std::ops::FnOnce(&mut Self::V) -> R,\n        {\n            ::std::option::Option::Some(f(&mut self.$v))\n        }\n    };\n}\n"
  },
  {
    "path": "src/view/text_log_view.rs",
    "content": "use anyhow::Result;\nuse std::sync::{Arc, Mutex};\n\nuse chrono::{DateTime, Duration, Local};\nuse chrono_tz::Tz;\nuse cursive::view::ViewWrapper;\n\nuse crate::common::RelativeDateTime;\nuse crate::interpreter::{\n    BackgroundRunner, ContextArc, TextLogArguments, WorkerEvent, clickhouse::Columns,\n};\nuse crate::view::{LogEntry, LogView};\nuse crate::wrap_impl_no_move;\n\npub type DateTime64 = DateTime<Local>;\npub type DateTimeArc = Arc<Mutex<DateTime64>>;\n\npub struct TextLogView {\n    inner_view: LogView,\n    last_event_time_microseconds: DateTimeArc,\n\n    #[allow(unused)]\n    bg_runner: Option<BackgroundRunner>,\n}\n\n// flush_interval_milliseconds for each *_log table from the config.xml/yml\nconst FLUSH_INTERVAL_MILLISECONDS: i64 = 7500;\n\nimpl TextLogView {\n    pub fn new(view_name: &'static str, context: ContextArc, args: TextLogArguments) -> Self {\n        let flush_interval_milliseconds =\n            Duration::try_milliseconds(FLUSH_INTERVAL_MILLISECONDS).unwrap();\n        let TextLogArguments {\n            query_ids,\n            logger_names,\n            hostname,\n            message_filter,\n            max_level,\n            start,\n            end,\n        } = args;\n        let last_event_time_microseconds = Arc::new(Mutex::new(start));\n\n        let (delay, is_cluster, wrap, no_strip_hostname_suffix, descending) = {\n            let ctx = context.lock().unwrap();\n            // Only show hostname in logs when in cluster mode AND no host filter is active\n            let show_hostname =\n                ctx.options.clickhouse.cluster.is_some() && ctx.selected_host.is_none();\n            (\n                ctx.options.view.delay_interval,\n                show_hostname,\n                ctx.options.view.wrap,\n                ctx.options.view.no_strip_hostname_suffix,\n                ctx.options.clickhouse.logs_order == crate::interpreter::options::LogsOrder::Desc,\n            )\n        };\n\n        let mut bg_runner = None;\n        // Start pulling only if the query did not finished, i.e. we don't know the end time.\n        // (but respect the FLUSH_INTERVAL_MILLISECONDS)\n        let now = Local::now();\n        if logger_names.is_none()\n            && let Some(mut end_date) = end.get_date_time()\n            && ((now - end_date) >= flush_interval_milliseconds || query_ids.is_none())\n        {\n            // It is possible to have messages in the system.text_log, whose\n            // event_time_microseconds > max(event_time_microseconds) from system.query_log\n            // But let's consider that 3 seconds is enough.\n            if query_ids.is_some() {\n                end_date += Duration::try_seconds(3).unwrap();\n            }\n            context.lock().unwrap().worker.send(\n                true,\n                WorkerEvent::TextLog(\n                    view_name,\n                    TextLogArguments {\n                        query_ids,\n                        logger_names: None,\n                        hostname,\n                        message_filter,\n                        max_level,\n                        start,\n                        end: RelativeDateTime::from(end_date),\n                    },\n                ),\n            );\n        } else {\n            let update_last_event_time_microseconds = last_event_time_microseconds.clone();\n            let update_callback_context = context.clone();\n\n            let is_first_invocation = Arc::new(Mutex::new(true));\n            let update_callback = move |force: bool| {\n                let effective_force = if *is_first_invocation.lock().unwrap() {\n                    *is_first_invocation.lock().unwrap() = false;\n                    true\n                } else {\n                    force\n                };\n\n                update_callback_context.lock().unwrap().worker.send(\n                    effective_force,\n                    WorkerEvent::TextLog(\n                        view_name,\n                        TextLogArguments {\n                            query_ids: query_ids.clone(),\n                            logger_names: logger_names.clone(),\n                            hostname: hostname.clone(),\n                            message_filter: message_filter.clone(),\n                            max_level: max_level.clone(),\n                            start: *update_last_event_time_microseconds.lock().unwrap(),\n                            end: end.clone(),\n                        },\n                    ),\n                );\n            };\n\n            let (bg_runner_cv, bg_runner_force) = {\n                let ctx = context.lock().unwrap();\n                (\n                    ctx.background_runner_cv.clone(),\n                    ctx.background_runner_force.clone(),\n                )\n            };\n            let mut created_bg_runner = BackgroundRunner::new(delay, bg_runner_cv, bg_runner_force);\n            created_bg_runner.start(update_callback);\n            bg_runner = Some(created_bg_runner);\n        }\n\n        TextLogView {\n            inner_view: LogView::new(is_cluster, wrap, no_strip_hostname_suffix, descending),\n            last_event_time_microseconds,\n            bg_runner,\n        }\n    }\n\n    pub fn update(&mut self, logs_block: Columns) -> Result<()> {\n        let mut last_event_time_microseconds = self.last_event_time_microseconds.lock().unwrap();\n\n        let mut logs = Vec::<LogEntry>::new();\n        for i in 0..logs_block.row_count() {\n            let log_entry = LogEntry {\n                host_name: logs_block.get::<_, _>(i, \"host_name\")?,\n                display_host_name: None,\n                event_time_microseconds: logs_block\n                    .get::<DateTime<Tz>, _>(i, \"event_time_microseconds\")?\n                    .with_timezone(&Local),\n                thread_id: logs_block.get::<_, _>(i, \"thread_id\")?,\n                level: logs_block.get::<_, _>(i, \"level\")?,\n                message: logs_block.get::<_, _>(i, \"message\")?,\n                query_id: logs_block.get::<_, _>(i, \"query_id\").ok(),\n                logger_name: logs_block.get::<_, _>(i, \"logger_name\").ok(),\n            };\n\n            if *last_event_time_microseconds < log_entry.event_time_microseconds {\n                *last_event_time_microseconds = log_entry.event_time_microseconds;\n            }\n\n            logs.push(log_entry);\n        }\n\n        self.inner_view.push_logs(logs);\n\n        return Ok(());\n    }\n}\n\nimpl ViewWrapper for TextLogView {\n    wrap_impl_no_move!(self.inner_view: LogView);\n}\n"
  },
  {
    "path": "src/view/utils.rs",
    "content": "use crate::interpreter::ContextArc;\nuse cursive::event::Key;\nuse cursive::theme::{ColorStyle, PaletteColor};\nuse cursive::traits::Resizable;\nuse cursive::view::Offset;\nuse cursive::views::{EditView, LinearLayout, OnEventView, ResizedView, TextView};\nuse cursive::{Cursive, XY};\n\n/// Shows a less-style filter/options prompt at the bottom left of the screen\n///\n/// # Arguments\n/// * `on_submit` - Callback to execute when the user submits the filter (presses Enter)\n///\n/// The filter prompt appears at the bottom-left corner with a `prefix`.\n/// The callback receives the entered text (without the `prefix`).\n/// Supports Up/Down arrow keys to navigate through search history.\n///\n/// TODO: add a callback in case of view has been removed w/o any item selected\npub fn show_bottom_prompt<F>(siv: &mut Cursive, prefix: &'static str, on_submit: F)\nwhere\n    F: Fn(&mut Cursive, &str) + 'static + Send + Sync,\n{\n    // Get search history from context\n    let context = siv.user_data::<ContextArc>().unwrap().clone();\n    let search_history = context.lock().unwrap().search_history.clone();\n    let search_history_submit = search_history.clone();\n\n    search_history.reset_index();\n\n    let prompt = TextView::new(prefix).style(ColorStyle::new(\n        PaletteColor::Primary,\n        PaletteColor::Background,\n    ));\n\n    let search_history_up = search_history.clone();\n    let search_history_down = search_history.clone();\n\n    let edit_view = EditView::new()\n        .on_submit(move |siv: &mut Cursive, text: &str| {\n            // Add to history before calling the callback\n            search_history_submit.add_entry(text.to_string());\n            on_submit(siv, text);\n        })\n        .style(ColorStyle::new(\n            PaletteColor::Primary,\n            PaletteColor::Background,\n        ))\n        .full_width();\n\n    let edit_with_history = OnEventView::new(edit_view)\n        .on_pre_event_inner(Key::Up, move |v: &mut ResizedView<EditView>, _event| {\n            let edit = v.get_inner_mut();\n            let current = edit.get_content();\n            if let Some(prev) = search_history_up.navigate_up(&current) {\n                edit.set_content(prev);\n            }\n            Some(cursive::event::EventResult::Consumed(None))\n        })\n        .on_pre_event_inner(Key::Down, move |v: &mut ResizedView<EditView>, _event| {\n            let edit = v.get_inner_mut();\n            if let Some(next) = search_history_down.navigate_down() {\n                edit.set_content(next);\n            }\n            Some(cursive::event::EventResult::Consumed(None))\n        });\n\n    let filter_bar = LinearLayout::horizontal()\n        .child(prompt)\n        .child(edit_with_history)\n        .full_width()\n        .fixed_height(1);\n\n    // Position at bottom left using add_transparent_layer_at\n    let screen_size = siv.screen_size();\n    let position = XY::new(\n        Offset::Absolute(0),\n        Offset::Absolute(screen_size.y.saturating_sub(1)),\n    );\n\n    siv.screen_mut()\n        .add_transparent_layer_at(position, filter_bar);\n}\n"
  },
  {
    "path": "tests/configs/accept_invalid_certificate.yaml",
    "content": "accept-invalid-certificate: true\n"
  },
  {
    "path": "tests/configs/basic.xml",
    "content": "<clickhouse>\n  <user>foo</user>\n  <password>bar</password>\n</clickhouse>\n"
  },
  {
    "path": "tests/configs/basic.yaml",
    "content": "---\nuser: foo\npassword: bar\n"
  },
  {
    "path": "tests/configs/chdig_basic.yaml",
    "content": "clickhouse:\n  url: \"tcp://config-host:9000\"\n  host: \"config-host\"\n  port: 9440\n  user: \"config_user\"\n  password: \"config_pass\"\n  secure: true\n  cluster: \"my_cluster\"\n  history: true\n  internal_queries: true\n  limit: 50000\n  skip_unavailable_shards: true\n  connection: \"prod\"\n  config: \"/path/to/client/config.yaml\"\n\nview:\n  delay_interval: 5000\n  group_by: true\n  no_subqueries: true\n  start: \"2hours\"\n  end: \"30min\"\n  wrap: true\n  no_strip_hostname_suffix: true\n  queries_limit: 500\n\nservice:\n  log: \"/tmp/chdig.log\"\n  pastila_clickhouse_host: \"https://custom.host/\"\n  pastila_url: \"https://custom.pastila/\"\n\nperfetto:\n  opentelemetry_span_log: true\n  trace_log: true\n  query_metric_log: true\n  part_log: false\n  query_thread_log: true\n  text_log: false\n"
  },
  {
    "path": "tests/configs/chdig_empty.yaml",
    "content": ""
  },
  {
    "path": "tests/configs/chdig_partial.yaml",
    "content": "clickhouse:\n  host: \"partial-host\"\n  user: \"partial_user\"\n\nview:\n  delay_interval: 10000\n"
  },
  {
    "path": "tests/configs/connections.yaml",
    "content": "---\nconnections_credentials:\n  play:\n    name: play\n    hostname: play.clickhouse.com\n    secure: true\n\n  play-tls:\n    name: play-tls\n    hostname: play.clickhouse.com\n    secure: true\n    ca_certificate: ca\n    client_certificate: cert\n    client_private_key: key\n    skip_verify: true\n"
  },
  {
    "path": "tests/configs/empty.xml",
    "content": "<clickhouse>\n</clickhouse>\n"
  },
  {
    "path": "tests/configs/empty.yaml",
    "content": ""
  },
  {
    "path": "tests/configs/tls.xml",
    "content": "<clickhouse>\n  <secure>true</secure>\n  <openSSL>\n    <client>\n      <verificationMode>strict</verificationMode>\n      <certificateFile>cert</certificateFile>\n      <privateKeyFile>key</privateKeyFile>\n      <caConfig>ca</caConfig>\n    </client>\n  </openSSL>\n</clickhouse>\n"
  },
  {
    "path": "tests/configs/tls.yaml",
    "content": "---\nsecure: true\nopenSSL:\n  client:\n    verificationMode: strict\n    certificateFile: cert\n    privateKeyFile: key\n    caConfig: ca\n"
  },
  {
    "path": "tests/configs/unknown_directives.xml",
    "content": "<clickhouse>\n  <foo>bar</foo>\n</clickhouse>\n"
  },
  {
    "path": "tests/configs/unknown_directives.yaml",
    "content": "---\nfoo: bar\n"
  },
  {
    "path": "typos.toml",
    "content": "# typos.toml\n\n[default.extend-identifiers]\nratatui = \"ratatui\"\nthr = \"thr\"\n\n[default.extend-words]\n# Used in imported code to reduce changes with upstream\ncolum = \"colum\"\nvizualization = \"vizualization\"\nindicies = \"indicies\"\nseperators = \"seperators\"\n"
  }
]