Full Code of Vonng/pg_exporter for AI

main e303f2ad915c cached
158 files
1.5 MB
351.8k tokens
226 symbols
1 requests
Download .txt
Showing preview only (1,590K chars total). Download the full file or copy to clipboard to get everything.
Repository: Vonng/pg_exporter
Branch: main
Commit: e303f2ad915c
Files: 158
Total size: 1.5 MB

Directory structure:
gitextract_340wwud1/

├── .github/
│   └── workflows/
│       ├── release.yaml
│       └── test-release.yaml
├── .gitignore
├── .goreleaser.yml
├── Dockerfile
├── Dockerfile.goreleaser
├── LICENSE
├── Makefile
├── README.md
├── config/
│   ├── 0000-doc.yml
│   ├── 0110-pg.yml
│   ├── 0120-pg_meta.yml
│   ├── 0130-pg_setting.yml
│   ├── 0210-pg_repl.yml
│   ├── 0220-pg_sync_standby.yml
│   ├── 0230-pg_downstream.yml
│   ├── 0240-pg_slot.yml
│   ├── 0250-pg_recv.yml
│   ├── 0260-pg_sub.yml
│   ├── 0270-pg_origin.yml
│   ├── 0300-pg_io.yml
│   ├── 0310-pg_size.yml
│   ├── 0320-pg_archiver.yml
│   ├── 0330-pg_bgwriter.yml
│   ├── 0331-pg_checkpointer.yml
│   ├── 0340-pg_ssl.yml
│   ├── 0350-pg_checkpoint.yml
│   ├── 0355-pg_timeline.yml
│   ├── 0360-pg_recovery.yml
│   ├── 0370-pg_slru.yml
│   ├── 0380-pg_shmem.yml
│   ├── 0390-pg_wal.yml
│   ├── 0410-pg_activity.yml
│   ├── 0420-pg_wait.yml
│   ├── 0430-pg_backend.yml
│   ├── 0440-pg_xact.yml
│   ├── 0450-pg_lock.yml
│   ├── 0460-pg_query.yml
│   ├── 0510-pg_vacuuming.yml
│   ├── 0520-pg_indexing.yml
│   ├── 0530-pg_clustering.yml
│   ├── 0540-pg_backup.yml
│   ├── 0610-pg_db.yml
│   ├── 0620-pg_db_confl.yml
│   ├── 0640-pg_pubrel.yml
│   ├── 0650-pg_subrel.yml
│   ├── 0700-pg_table.yml
│   ├── 0710-pg_index.yml
│   ├── 0720-pg_func.yml
│   ├── 0730-pg_seq.yml
│   ├── 0740-pg_relkind.yml
│   ├── 0750-pg_defpart.yml
│   ├── 0810-pg_table_size.yml
│   ├── 0820-pg_table_bloat.yml
│   ├── 0830-pg_index_bloat.yml
│   ├── 0910-pgbouncer_list.yml
│   ├── 0920-pgbouncer_database.yml
│   ├── 0930-pgbouncer_stat.yml
│   ├── 0940-pgbouncer_pool.yml
│   ├── 1000-pg_wait_event.yml
│   ├── 1800-pg_tsdb_hypertable.yml
│   ├── 1900-pg_citus.yml
│   └── 2000-pg_heartbeat.yml
├── docker/
│   ├── .dockerignore
│   ├── README.md
│   ├── build.sh
│   └── release.sh
├── exporter/
│   ├── arg.go
│   ├── args_normalize.go
│   ├── args_normalize_test.go
│   ├── collector.go
│   ├── column.go
│   ├── concurrency_test.go
│   ├── config.go
│   ├── config_coverage_pg9_test.go
│   ├── config_coverage_test.go
│   ├── config_merged_test.go
│   ├── config_style_test.go
│   ├── config_test.go
│   ├── exporter.go
│   ├── exporter_handlers_opts_test.go
│   ├── global.go
│   ├── health_state_test.go
│   ├── main.go
│   ├── metrics_lifecycle_test.go
│   ├── pgurl.go
│   ├── pgurl_test.go
│   ├── predicate_cache_test.go
│   ├── probehealth_pgbouncer_test.go
│   ├── prom_validate.go
│   ├── query.go
│   ├── query_column_test.go
│   ├── reload_signals_unix.go
│   ├── reload_signals_windows.go
│   ├── reload_test.go
│   ├── server.go
│   ├── server_exporter_test.go
│   ├── testmain_test.go
│   ├── utils.go
│   ├── utils_test.go
│   ├── validate_labels.go
│   └── validate_labels_test.go
├── go.mod
├── go.sum
├── hugo.yaml
├── legacy/
│   ├── README.md
│   ├── config/
│   │   ├── 0000-doc.yml
│   │   ├── 0110-pg.yml
│   │   ├── 0120-pg_meta.yml
│   │   ├── 0130-pg_setting.yml
│   │   ├── 0210-pg_repl.yml
│   │   ├── 0220-pg_sync_standby.yml
│   │   ├── 0230-pg_downstream.yml
│   │   ├── 0240-pg_slot.yml
│   │   ├── 0250-pg_recv.yml
│   │   ├── 0270-pg_origin.yml
│   │   ├── 0310-pg_size.yml
│   │   ├── 0320-pg_archiver.yml
│   │   ├── 0330-pg_bgwriter.yml
│   │   ├── 0331-pg_checkpointer.yml
│   │   ├── 0340-pg_ssl.yml
│   │   ├── 0350-pg_checkpoint.yml
│   │   ├── 0355-pg_timeline.yml
│   │   ├── 0360-pg_recovery.yml
│   │   ├── 0410-pg_activity.yml
│   │   ├── 0420-pg_wait.yml
│   │   ├── 0440-pg_xact.yml
│   │   ├── 0450-pg_lock.yml
│   │   ├── 0460-pg_query.yml
│   │   ├── 0610-pg_db.yml
│   │   ├── 0620-pg_db_confl.yml
│   │   ├── 0700-pg_table.yml
│   │   ├── 0710-pg_index.yml
│   │   ├── 0720-pg_func.yml
│   │   ├── 0740-pg_relkind.yml
│   │   ├── 0810-pg_table_size.yml
│   │   ├── 0820-pg_table_bloat.yml
│   │   ├── 0830-pg_index_bloat.yml
│   │   ├── 0910-pgbouncer_list.yml
│   │   ├── 0920-pgbouncer_database.yml
│   │   ├── 0930-pgbouncer_stat.yml
│   │   ├── 0940-pgbouncer_pool.yml
│   │   ├── 1800-pg_tsdb_hypertable.yml
│   │   ├── 1900-pg_citus.yml
│   │   └── 2000-pg_heartbeat.yml
│   └── pg_exporter.yml
├── main.go
├── monitor/
│   ├── initdb.sh
│   ├── pgrds-instance.json
│   └── pgsql-exporter.json
├── package/
│   ├── nfpm-amd64-deb.yaml
│   ├── nfpm-amd64-rpm.yaml
│   ├── nfpm-arm64-deb.yaml
│   ├── nfpm-arm64-rpm.yaml
│   ├── pg_exporter.default
│   ├── pg_exporter.service
│   └── preinstall.sh
└── pg_exporter.yml

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/workflows/release.yaml
================================================
name: Release

on:
  push:
    tags:
      - 'v*.*.*'

permissions:
  contents: write

jobs:
  release:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: 'go.mod'
          cache: true

      - name: Run unit tests
        run: go test ./...

      - name: Run go vet
        run: go vet ./...

      - name: Set up QEMU
        uses: docker/setup-qemu-action@v3

      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v3

      - name: Login to Docker Hub
        uses: docker/login-action@v3
        with:
          username: ${{ secrets.DOCKERHUB_USERNAME }}
          password: ${{ secrets.DOCKERHUB_TOKEN }}

      - name: Run GoReleaser
        uses: goreleaser/goreleaser-action@v6
        with:
          distribution: goreleaser
          version: latest
          args: release --clean
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

      - name: Upload artifacts
        uses: actions/upload-artifact@v4
        if: always()
        with:
          name: dist
          path: dist/


================================================
FILE: .github/workflows/test-release.yaml
================================================
name: Test Release

on:
  workflow_dispatch:  # 允许手动触发
  pull_request:
    paths:
      - '.goreleaser.yml'
      - '.github/workflows/release.yaml'
      - '.github/workflows/test-release.yaml'

permissions:
  contents: read

jobs:
  test:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - name: Set up Go
        uses: actions/setup-go@v5
        with:
          go-version-file: 'go.mod'
          cache: true

      - name: Set up QEMU
        uses: docker/setup-qemu-action@v3

      - name: Set up Docker Buildx
        uses: docker/setup-buildx-action@v3

      - name: Run unit tests
        run: go test ./...

      - name: Run go vet
        run: go vet ./...

      - name: Test GoReleaser config
        uses: goreleaser/goreleaser-action@v6
        with:
          distribution: goreleaser
          version: latest
          args: check

      - name: Build snapshot
        uses: goreleaser/goreleaser-action@v6
        with:
          distribution: goreleaser
          version: latest
          args: release --snapshot --clean --skip=publish,docker

      - name: List artifacts
        run: |
          echo "Generated artifacts:"
          ls -lh dist/*.tar.gz || true
          echo ""
          echo "Checksums:"
          cat dist/checksums.txt || true


================================================
FILE: .gitignore
================================================
# binary files
pg_exporter

# tmp files
test/
deploy/
upload.sh
temp/
dist/
.DS_Store

# IDE files
.vscode/
.idea/
.code/
.claude
.codex/
.codex_tmp/
pg_exporter.iml
CLAUDE.md

.hugo_build.lock
public/
resources/
tmp/


================================================
FILE: .goreleaser.yml
================================================
version: 2

env:
  - CGO_ENABLED=0

before:
  hooks:
    - go mod download
    - go mod tidy

builds:
  - id: pg_exporter
    main: ./main.go
    binary: pg_exporter
    goos:
      - linux
      - darwin
      - windows
    goarch:
      - amd64
      - arm64
      - ppc64le
    goarm:
      - 6
      - 7
    goamd64:
      - v1
    ignore:
      # Darwin only supports amd64 and arm64
      - goos: darwin
        goarch: ppc64le
      # Windows only supports amd64 and 386
      - goos: windows
        goarch: arm64
      - goos: windows
        goarch: arm
      - goos: windows
        goarch: ppc64le
    ldflags:
      - -s -w
      - -extldflags "-static"
      - -X 'pg_exporter/exporter.Version={{.Version}}'
      - -X 'pg_exporter/exporter.Branch={{.Branch}}'
      - -X 'pg_exporter/exporter.Revision={{.ShortCommit}}'
      - -X 'pg_exporter/exporter.BuildDate={{.Date}}'
    flags:
      - -a

archives:
  - id: pg_exporter
    name_template: >-
      {{ .ProjectName }}-{{ .Version }}.{{ .Os }}-
      {{- if eq .Arch "amd64" }}amd64
      {{- else if eq .Arch "386" }}386
      {{- else if eq .Arch "arm64" }}arm64
      {{- else if eq .Arch "arm" }}armv{{ .Arm }}
      {{- else if eq .Arch "ppc64le" }}ppc64le
      {{- else }}{{ .Arch }}{{ end }}
    files:
      - pg_exporter.yml
      - LICENSE
      - package/pg_exporter.default
      - package/pg_exporter.service

nfpms:
  - id: pg_exporter_rpm
    package_name: pg_exporter
    file_name_template: >-
      {{ .PackageName }}-{{ .Version }}-{{ .Release }}.
      {{- if eq .Arch "amd64" }}x86_64
      {{- else if eq .Arch "arm64" }}aarch64
      {{- else }}{{ .Arch }}{{ end }}
    vendor: PGSTY
    homepage: https://pigsty.io/docs/pg_exporter
    maintainer: Ruohang Feng <rh@vonng.com>
    description: |
      Prometheus exporter for PostgreSQL / Pgbouncer server metrics.
      Supported version: Postgres9.4 - 17+ & Pgbouncer 1.8 - 1.24+
      Part of Project Pigsty -- Battery Included PostgreSQL Distribution
      with ultimate observability support: https://pigsty.io/docs
    license: Apache-2.0
    formats:
      - rpm
    bindir: /usr/bin
    release: "1"
    section: database
    priority: optional
    contents:
      - src: pg_exporter.yml
        dst: /etc/pg_exporter.yml
        type: config|noreplace
        file_info:
          mode: 0700
          owner: prometheus
          group: prometheus
      - src: package/pg_exporter.default
        dst: /etc/default/pg_exporter
        type: config|noreplace
        file_info:
          mode: 0700
          owner: prometheus
          group: prometheus
      - src: package/pg_exporter.service
        dst: /usr/lib/systemd/system/pg_exporter.service
        type: config
      - src: LICENSE
        dst: /usr/share/doc/pg_exporter/LICENSE
        file_info:
          mode: 0644
    scripts:
      preinstall: package/preinstall.sh
    rpm:
      compression: gzip
      prefixes:
        - /usr/bin

  - id: pg_exporter_deb
    package_name: pg-exporter
    file_name_template: >-
      {{ .PackageName }}_{{ .Version }}-{{ .Release }}_
      {{- if eq .Arch "amd64" }}amd64
      {{- else if eq .Arch "arm64" }}arm64
      {{- else }}{{ .Arch }}{{ end }}
    vendor: PGSTY
    homepage: https://pigsty.io/docs/pg_exporter
    maintainer: Ruohang Feng <rh@vonng.com>
    description: |
      Prometheus exporter for PostgreSQL / Pgbouncer server metrics.
      Supported version: Postgres9.4 - 17+ & Pgbouncer 1.8 - 1.24+
      Part of Project Pigsty -- Battery Included PostgreSQL Distribution
      with ultimate observability support: https://pigsty.io/docs
    license: Apache-2.0
    formats:
      - deb
    bindir: /usr/bin
    release: "1"
    section: database
    priority: optional
    contents:
      - src: pg_exporter.yml
        dst: /etc/pg_exporter.yml
        type: config|noreplace
        file_info:
          mode: 0700
          owner: prometheus
          group: prometheus
      - src: package/pg_exporter.default
        dst: /etc/default/pg_exporter
        type: config|noreplace
        file_info:
          mode: 0700
          owner: prometheus
          group: prometheus
      - src: package/pg_exporter.service
        dst: /lib/systemd/system/pg_exporter.service
        type: config
      - src: LICENSE
        dst: /usr/share/doc/pg_exporter/LICENSE
        file_info:
          mode: 0644
    scripts:
      preinstall: package/preinstall.sh

checksum:
  name_template: 'checksums.txt'
  algorithm: sha256

snapshot:
  version_template: "{{ .Tag }}-next"

changelog:
  sort: asc
  filters:
    exclude:
      - '^docs:'
      - '^test:'
      - '^chore:'
      - 'Merge pull request'
      - 'Merge branch'

release:
  github:
    owner: pgsty
    name: pg_exporter
  draft: false
  prerelease: false
  mode: replace     # Replace existing release with same tag
  replace_existing_artifacts: true  # Replace existing artifacts
  name_template: "{{.ProjectName}}-v{{.Version}}"
  disable: false
  discussion_category_name: ""  # Skip discussion creation

announce:
  skip: true  # Skip all announcements

# Docker configuration for multi-arch images
dockers:
  - id: pg_exporter_amd64
    ids: 
      - pg_exporter
    goos: linux
    goarch: amd64
    image_templates:
      - "pgsty/pg_exporter:{{ .Version }}-amd64"
      - "pgsty/pg_exporter:latest-amd64"
    dockerfile: Dockerfile.goreleaser
    use: buildx
    build_flag_templates:
      - "--platform=linux/amd64"
      - "--label=org.opencontainers.image.version={{.Version}}"
      - "--label=org.opencontainers.image.created={{.Date}}"
      - "--label=org.opencontainers.image.revision={{.FullCommit}}"
    extra_files:
      - pg_exporter.yml
      - LICENSE

  - id: pg_exporter_arm64
    ids:
      - pg_exporter
    goos: linux
    goarch: arm64
    image_templates:
      - "pgsty/pg_exporter:{{ .Version }}-arm64"
      - "pgsty/pg_exporter:latest-arm64"
    dockerfile: Dockerfile.goreleaser
    use: buildx
    build_flag_templates:
      - "--platform=linux/arm64"
      - "--label=org.opencontainers.image.version={{.Version}}"
      - "--label=org.opencontainers.image.created={{.Date}}"
      - "--label=org.opencontainers.image.revision={{.FullCommit}}"
    extra_files:
      - pg_exporter.yml
      - LICENSE

docker_manifests:
  - name_template: "pgsty/pg_exporter:{{ .Version }}"
    image_templates:
      - "pgsty/pg_exporter:{{ .Version }}-amd64"
      - "pgsty/pg_exporter:{{ .Version }}-arm64"
  - name_template: "pgsty/pg_exporter:latest"
    image_templates:
      - "pgsty/pg_exporter:latest-amd64"
      - "pgsty/pg_exporter:latest-arm64"

================================================
FILE: Dockerfile
================================================
# syntax=docker/dockerfile:1
FROM golang:1.26.2-alpine AS builder-env

ARG GOPROXY=https://proxy.golang.org,direct
ARG GOSUMDB=sum.golang.org
ENV GOPROXY=${GOPROXY}
ENV GOSUMDB=${GOSUMDB}

# Build a self-contained pg_exporter container with a clean environment and no
# dependencies.
#
# build with
#
#   docker buildx build -f Dockerfile --tag pg_exporter .
#

WORKDIR /build

COPY go.mod go.sum ./
RUN \
  --mount=type=cache,target=/go/pkg/mod \
  --mount=type=cache,target=/root/.cache/go-build \
  CGO_ENABLED=0 GOOS=linux go mod download

COPY . /build
RUN \
  --mount=type=cache,target=/go/pkg/mod \
  --mount=type=cache,target=/root/.cache/go-build \
  CGO_ENABLED=0 GOOS=linux go build -a -o /pg_exporter .

FROM scratch
LABEL org.opencontainers.image.authors="Ruohang Feng <rh@vonng.com>, Craig Ringer <craig.ringer@enterprisedb.com>" \
      org.opencontainers.image.url="https://github.com/pgsty/pg_exporter" \
      org.opencontainers.image.source="https://github.com/pgsty/pg_exporter" \
      org.opencontainers.image.licenses="Apache-2.0" \
      org.opencontainers.image.title="pg_exporter" \
      org.opencontainers.image.description="PostgreSQL/Pgbouncer metrics exporter for Prometheus"

WORKDIR /bin
COPY --from=builder-env /pg_exporter /bin/pg_exporter
COPY pg_exporter.yml /etc/pg_exporter.yml
EXPOSE 9630/tcp
ENTRYPOINT ["/bin/pg_exporter"]


================================================
FILE: Dockerfile.goreleaser
================================================
# Dockerfile for goreleaser
# This uses pre-built binaries from goreleaser instead of building from source
FROM scratch

LABEL org.opencontainers.image.authors="Ruohang Feng <rh@vonng.com>" \
      org.opencontainers.image.url="https://github.com/pgsty/pg_exporter" \
      org.opencontainers.image.source="https://github.com/pgsty/pg_exporter" \
      org.opencontainers.image.licenses="Apache-2.0" \
      org.opencontainers.image.title="pg_exporter" \
      org.opencontainers.image.description="PostgreSQL/Pgbouncer metrics exporter for Prometheus"

WORKDIR /bin
COPY pg_exporter /bin/pg_exporter
COPY pg_exporter.yml /etc/pg_exporter.yml
COPY LICENSE /LICENSE

EXPOSE 9630/tcp
ENTRYPOINT ["/bin/pg_exporter"]

================================================
FILE: LICENSE
================================================

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [2019-2025] [Ruohang Feng](rh@vonng.com)

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: Makefile
================================================
#==============================================================#
# File      :   Makefile
# Mtime     :   2025-08-14
# License   :   Apache-2.0 @ https://github.com/pgsty/pg_exporter
# Copyright :   2018-2026  Ruohang Feng / Vonng (rh@vonng.com)
#==============================================================#
VERSION      ?= v1.2.2
BUILD_DATE   := $(shell date '+%Y%m%d%H%M%S')
GIT_BRANCH   := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
GIT_REVISION := $(shell git rev-parse --short HEAD 2>/dev/null  || echo "HEAD")
LDFLAGS_META := -X 'pg_exporter/exporter.Version=$(VERSION)' \
                -X 'pg_exporter/exporter.Branch=$(GIT_BRANCH)' \
                -X 'pg_exporter/exporter.Revision=$(GIT_REVISION)' \
                -X 'pg_exporter/exporter.BuildDate=$(BUILD_DATE)'
LDFLAGS_STATIC := -s -w -extldflags \"-static\" $(LDFLAGS_META)

# Release Dir
LINUX_AMD_DIR:=dist/$(VERSION)/pg_exporter-$(VERSION).linux-amd64
LINUX_ARM_DIR:=dist/$(VERSION)/pg_exporter-$(VERSION).linux-arm64
DARWIN_AMD_DIR:=dist/$(VERSION)/pg_exporter-$(VERSION).darwin-amd64
DARWIN_ARM_DIR:=dist/$(VERSION)/pg_exporter-$(VERSION).darwin-arm64
WINDOWS_DIR:=dist/$(VERSION)/pg_exporter-$(VERSION).windows-amd64


###############################################################
#                        Shortcuts                            #
###############################################################
build:
	go build -ldflags "$(LDFLAGS_META)" -o pg_exporter
clean:
	rm -rf pg_exporter
build-darwin-amd64:
	CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -ldflags "$(LDFLAGS_STATIC)" -o pg_exporter
build-darwin-arm64:
	CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -a -ldflags "$(LDFLAGS_STATIC)" -o pg_exporter
build-linux-amd64:
	CGO_ENABLED=0 GOOS=linux  GOARCH=amd64 go build -a -ldflags "$(LDFLAGS_STATIC)" -o pg_exporter
build-linux-arm64:
	CGO_ENABLED=0 GOOS=linux  GOARCH=arm64 go build -a -ldflags "$(LDFLAGS_STATIC)" -o pg_exporter

r: release
release: release-linux release-darwin

release-linux: linux-amd64 linux-arm64
linux-amd64: clean build-linux-amd64
	rm -rf $(LINUX_AMD_DIR) && mkdir -p $(LINUX_AMD_DIR)
	nfpm package --packager rpm --config package/nfpm-amd64-rpm.yaml --target dist/$(VERSION)
	nfpm package --packager deb --config package/nfpm-amd64-deb.yaml --target dist/$(VERSION)
	cp pg_exporter $(LINUX_AMD_DIR)/pg_exporter
	cp pg_exporter.yml $(LINUX_AMD_DIR)/pg_exporter.yml
	cp LICENSE $(LINUX_AMD_DIR)/LICENSE
	tar -czf dist/$(VERSION)/pg_exporter-$(VERSION).linux-amd64.tar.gz -C dist/$(VERSION) pg_exporter-$(VERSION).linux-amd64
	rm -rf $(LINUX_AMD_DIR)

linux-arm64: clean build-linux-arm64
	rm -rf $(LINUX_ARM_DIR) && mkdir -p $(LINUX_ARM_DIR)
	nfpm package --packager rpm --config package/nfpm-arm64-rpm.yaml --target dist/$(VERSION)
	nfpm package --packager deb --config package/nfpm-arm64-deb.yaml --target dist/$(VERSION)
	cp pg_exporter $(LINUX_ARM_DIR)/pg_exporter
	cp pg_exporter.yml $(LINUX_ARM_DIR)/pg_exporter.yml
	cp LICENSE $(LINUX_ARM_DIR)/LICENSE
	tar -czf dist/$(VERSION)/pg_exporter-$(VERSION).linux-arm64.tar.gz -C dist/$(VERSION) pg_exporter-$(VERSION).linux-arm64
	rm -rf $(LINUX_ARM_DIR)

release-darwin: darwin-amd64 darwin-arm64
darwin-amd64: clean build-darwin-amd64
	rm -rf $(DARWIN_AMD_DIR) && mkdir -p $(DARWIN_AMD_DIR)
	cp pg_exporter $(DARWIN_AMD_DIR)/pg_exporter
	cp pg_exporter.yml $(DARWIN_AMD_DIR)/pg_exporter.yml
	cp LICENSE $(DARWIN_AMD_DIR)/LICENSE
	tar -czf dist/$(VERSION)/pg_exporter-$(VERSION).darwin-amd64.tar.gz -C dist/$(VERSION) pg_exporter-$(VERSION).darwin-amd64
	rm -rf $(DARWIN_AMD_DIR)

darwin-arm64: clean build-darwin-arm64
	rm -rf $(DARWIN_ARM_DIR) && mkdir -p $(DARWIN_ARM_DIR)
	cp pg_exporter $(DARWIN_ARM_DIR)/pg_exporter
	cp pg_exporter.yml $(DARWIN_ARM_DIR)/pg_exporter.yml
	cp LICENSE $(DARWIN_ARM_DIR)/LICENSE
	tar -czf dist/$(VERSION)/pg_exporter-$(VERSION).darwin-arm64.tar.gz -C dist/$(VERSION) pg_exporter-$(VERSION).darwin-arm64
	rm -rf $(DARWIN_ARM_DIR)



###############################################################
#                      Configuration                          #
###############################################################
# generate merged config from separated configuration
conf:
	rm -rf pg_exporter.yml
	cat config/*.yml >> pg_exporter.yml

# generate legacy merged config for PostgreSQL 9.1 - 9.6
conf9:
	rm -rf legacy/pg_exporter.yml
	cat legacy/config/*.yml >> legacy/pg_exporter.yml

# Backward-compatible alias (deprecated)
conf-pg9: conf9


###############################################################
#                         Release                             #
###############################################################
release-dir:
	mkdir -p dist/$(VERSION)

release-clean:
	rm -rf dist/$(VERSION)

###############################################################
#                      GoReleaser                            #
###############################################################
# Install goreleaser if not present
goreleaser-install:
	@which goreleaser > /dev/null || (echo "Installing goreleaser..." && go install github.com/goreleaser/goreleaser/v2@latest)

# Build snapshot release (without publishing)
goreleaser-snapshot: goreleaser-install
	goreleaser release --snapshot --clean --skip=publish

# Build release locally (without git tag)
goreleaser-build: goreleaser-install
	goreleaser build --snapshot --clean

# Build release locally without snapshot suffix (requires clean git)
goreleaser-local: goreleaser-install
	goreleaser release --clean --skip=publish

# Release with goreleaser (requires git tag)
goreleaser-release: goreleaser-install
	goreleaser release --clean

# Test release (creates prerelease, no notifications)
goreleaser-test-release: goreleaser-install
	@echo "Creating test release (prerelease mode, no notifications)..."
	goreleaser release --clean

# Production release (set prerelease to false in config first)
goreleaser-prod-release: goreleaser-install
	@echo "Creating production release (will notify subscribers if announce.skip is false)..."
	goreleaser release --clean

# Check goreleaser configuration
goreleaser-check: goreleaser-install
	goreleaser check

# New main release task using goreleaser
release-new: goreleaser-release


# build docker image
docker: docker-build
docker-build:
	./docker/build.sh
docker-release:
	./docker/release.sh

###############################################################
#                         Develop                             #
###############################################################
install: build
	sudo install -m 0755 pg_exporter /usr/bin/pg_exporter

uninstall:
	sudo rm -rf /usr/bin/pg_exporter

runb:
	./pg_exporter --log.level=info --config=pg_exporter.yml --auto-discovery
run:
	go run main.go --log.level=info --config=pg_exporter.yml --auto-discovery

debug:
	go run main.go --log.level=debug --config=pg_exporter.yml --auto-discovery

curl:
	curl localhost:9630/metrics | grep -v '#' | grep pg_

upload:
	./upload.sh

d: dev
dev:
	hugo serve

.PHONY: build clean build-darwin build-linux\
 release release-darwin release-linux release-windows docker docker-build docker-release \
 install uninstall debug curl upload \
 goreleaser-install goreleaser-snapshot goreleaser-build goreleaser-release goreleaser-test-release \
 goreleaser-check release-new goreleaser-local


================================================
FILE: README.md
================================================
<p align="center">
  <img src="static/logo.png" alt="PG Exporter Logo" height="128" align="middle">
</p>

# PG EXPORTER

[![Webite: https://pigsty.io/docs/pg_exporter](https://img.shields.io/badge/website-pigsty.io/docs/pg_exporter-slategray?style=flat&logo=cilium&logoColor=white)](https://pigsty.io/docs/pg_exporter)
[![DockerHub: pgsty/pg_exporter](https://img.shields.io/badge/docker-pgsty/pg_exporter-slategray?style=flat&logo=docker&logoColor=white)](https://hub.docker.com/r/pgsty/pg_exporter)
[![Version: 1.2.2](https://img.shields.io/badge/version-1.2.2-slategray?style=flat&logo=cilium&logoColor=white)](https://github.com/pgsty/pg_exporter/releases/tag/v1.2.2)
[![License: Apache-2.0](https://img.shields.io/github/license/pgsty/pg_exporter?logo=opensourceinitiative&logoColor=green&color=slategray)](https://github.com/pgsty/pg_exporter/blob/main/LICENSE)
[![GitHub Stars](https://img.shields.io/github/stars/pgsty/pg_exporter?style=flat&logo=github&logoColor=black&color=slategray)](https://star-history.com/#pgsty/pg_exporter&Date)
[![Go Report Card](https://goreportcard.com/badge/github.com/pgsty/pg_exporter)](https://goreportcard.com/report/github.com/pgsty/pg_exporter)

> **Advanced [PostgreSQL](https://www.postgresql.org) & [pgBouncer](https://www.pgbouncer.org/) metrics [exporter](https://prometheus.io/docs/instrumenting/exporters/) for [Prometheus](https://prometheus.io/)**

PG Exporter brings ultimate monitoring experience to your PostgreSQL with **declarative config**, **dynamic planning**, and **customizable collectors**. 
It provides **600+** metrics and ~3K time series per instance, covers everything you'll need for PostgreSQL observability.

Check [**https://demo.pigsty.io**](https://demo.pigsty.io/ui/) for live demo, which is built upon this exporter by [**Pigsty**](https://pigsty.io).

<div align="center">
    <a href="https://pigsty.io/docs/pg_exporter">Docs</a> •    
    <a href="#quick-start">Quick Start</a> •
    <a href="#features">Features</a> •
    <a href="#usage">Usage</a> •
    <a href="#api">API</a> •
    <a href="#deployment">Deployment</a> •
    <a href="#collectors">Collectors</a> •
    <a href="https://demo.pigsty.io/ui/">Demo</a>
</div><br>

[![pigsty-dashboard](https://pigsty.io/img/pigsty/dashboard.jpg)](https://demo.pigsty.io)


--------

## Features

- **Highly Customizable**: Define almost all metrics through declarative YAML configs
- **Full Coverage**: Monitor PostgreSQL (10-18+) and pgBouncer (1.8-1.25+) in a single exporter
- **Fine-grained Control**: Configure timeout, caching, skip conditions, and fatality per collector
- **Dynamic Planning**: Define multiple query branches based on different conditions
- **Self-monitoring**: Rich metrics about pg_exporter [itself](https://demo.pigsty.io/d/pgsql-exporter) for complete observability
- **Production-Ready**: Battle-tested in real-world environments across 12K+ cores for 6+ years
- **Auto-discovery**: Automatically discover and monitor multiple databases within an instance
- **Health Check APIs**: Comprehensive HTTP endpoints for service health and traffic routing
- **Extension Support**: `timescaledb`, `citus`, `pg_stat_statements`, `pg_wait_sampling`,...
- **Local-first URL behavior**: Built for on-host deployment, with implicit local target fallback and automatic `sslmode=disable` when omitted

> Also support PG 9.x with [legacy config bundle](legacy/).


--------

## Quick Start

RPM / DEB / Tarball available in the GitHub [release page](https://github.com/pgsty/pg_exporter/releases), and Pigsty's YUM / APT [Infra Repo](https://pigsty.io/docs/repo/infra).

To run this exporter, you need to pass the postgres/pgbouncer URL via env or arg:

```bash
PG_EXPORTER_URL='postgres://user:pass@host:port/postgres' pg_exporter
curl http://localhost:9630/metrics   # access metrics
```

There are built-in metrics such as `pg_up`, `pg_version`, `pg_in_recovery`, `pg_exporter_build_info`, and exporter self-metrics under `pg_exporter_*` (disable with `--disable-intro`).

**All other metrics are defined in the [`pg_exporter.yml`](pg_exporter.yml) config file**.

There are two monitoring dashboards in the [`monitor/`](monitor/) directory.

You can use [**Pigsty**](https://pigsty.io) to monitor existing PostgreSQL cluster or RDS, it will setup pg_exporter for you. 


--------

## Usage

```bash
usage: pg_exporter [<flags>]


Flags:
  -h, --[no-]help            Show context-sensitive help (also try --help-long and --help-man).
  -u, --url=URL              postgres target url
  -c, --config=CONFIG        path to config dir or file
      --web.listen-address=:9630 ...  
                             Addresses on which to expose metrics and web interface. 
      --web.config.file=""   Path to configuration file that can enable TLS or authentication. 
  -l, --label=""             constant lables:comma separated list of label=value pair ($PG_EXPORTER_LABEL)
  -t, --tag=""               tags,comma separated list of server tag ($PG_EXPORTER_TAG)
  -C, --[no-]disable-cache   force not using cache ($PG_EXPORTER_DISABLE_CACHE)
  -m, --[no-]disable-intro   disable internal/exporter self metrics ($PG_EXPORTER_DISABLE_INTRO)
  -a, --[no-]auto-discovery  automatically scrape all database for given server ($PG_EXPORTER_AUTO_DISCOVERY)
  -x, --exclude-database="template0,template1,postgres"  
                             excluded databases when enabling auto-discovery ($PG_EXPORTER_EXCLUDE_DATABASE)
  -i, --include-database=""  included databases when enabling auto-discovery ($PG_EXPORTER_INCLUDE_DATABASE)
  -n, --namespace=""         prefix of built-in metrics, (pg|pgbouncer) by default ($PG_EXPORTER_NAMESPACE)
  -f, --[no-]fail-fast       fail fast instead of waiting during start-up ($PG_EXPORTER_FAIL_FAST)
  -T, --connect-timeout=100  connect timeout in ms, 100 by default ($PG_EXPORTER_CONNECT_TIMEOUT)
  -P, --web.telemetry-path="/metrics"  
                             URL path under which to expose metrics. ($PG_EXPORTER_TELEMETRY_PATH)
  -D, --[no-]dry-run         dry run and print raw configs
  -E, --[no-]explain         explain server planned queries
      --log.level="info"     log level: debug|info|warn|error]
      --log.format="logfmt"  log format: logfmt|json
      --[no-]version         Show application version.
```

Parameters could be given via command-line args or environment variables. 

| CLI Arg                | Environment Variable           | Default Value                    |
|------------------------|--------------------------------|----------------------------------|
| `--url`                | `PG_EXPORTER_URL`              | `postgresql:///?sslmode=disable` |
| `--config`             | `PG_EXPORTER_CONFIG`           | `pg_exporter.yml`                |
| `--label`              | `PG_EXPORTER_LABEL`            |                                  |
| `--tag`                | `PG_EXPORTER_TAG`              |                                  |
| `--auto-discovery`     | `PG_EXPORTER_AUTO_DISCOVERY`   | `true`                           |
| `--disable-cache`      | `PG_EXPORTER_DISABLE_CACHE`    | `false`                          |
| `--fail-fast`          | `PG_EXPORTER_FAIL_FAST`        | `false`                          |
| `--exclude-database`   | `PG_EXPORTER_EXCLUDE_DATABASE` |                                  |
| `--include-database`   | `PG_EXPORTER_INCLUDE_DATABASE` |                                  |
| `--namespace`          | `PG_EXPORTER_NAMESPACE`        | `pg\|pgbouncer`                  |
| `--connect-timeout`    | `PG_EXPORTER_CONNECT_TIMEOUT`  | `100`                            |
| `--dry-run`            |                                | `false`                          |
| `--explain`            |                                | `false`                          |
| `--log.level`          |                                | `info`                           |
| `--log.format`         |                                | `logfmt`                         |
| `--web.listen-address` |                                | `:9630`                          |
| `--web.config.file`    |                                | `""`                             |
| `--web.telemetry-path` | `PG_EXPORTER_TELEMETRY_PATH`   | `/metrics`                       |

### Connection URL Defaults

- If `--url` / `PG_EXPORTER_URL` is not provided, pg_exporter falls back to a local-first default URL: `postgresql:///?sslmode=disable`.
- If `sslmode` is not explicitly set in the URL, pg_exporter injects `sslmode=disable` by default.
- This is an intentional design choice for common on-host deployments (`pg_exporter` and PostgreSQL/PgBouncer on the same machine), where loopback TLS adds overhead with little practical gain.
- If you need TLS for remote targets, provide `sslmode` explicitly in the connection URL (for example: `sslmode=require`, `verify-ca`, `verify-full`).


------

## API

PG Exporter provides a rich set of HTTP endpoints:

Here are `pg_exporter` REST APIs

```bash
# Fetch metrics (customizable)
curl localhost:9630/metrics

# Reload configuration
curl -X POST localhost:9630/reload

# Explain configuration
curl localhost:9630/explain

# Print Statistics
curl localhost:9630/stat

# Aliveness health check (200 up, 503 down)
curl localhost:9630/up
curl localhost:9630/health
curl localhost:9630/liveness
curl localhost:9630/readiness

# traffic route health check

### 200 if not in recovery, 404 if in recovery, 503 if server is down
curl localhost:9630/primary
curl localhost:9630/leader
curl localhost:9630/master
curl localhost:9630/read-write
curl localhost:9630/rw

### 200 if in recovery, 404 if not in recovery, 503 if server is down
curl localhost:9630/replica
curl localhost:9630/standby
curl localhost:9630/read-only
curl localhost:9630/ro

### 200 if server is ready for read traffic (including primary), 503 if server is down
curl localhost:9630/read
```


--------

## Build

To build a static stand-alone binary for docker scratch

```bash
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o pg_exporter
```

Or [download](https://github.com/pgsty/pg_exporter/releases) the latest prebuilt binaries from release pages.

We also have pre-packaged RPM / DEB packages in the [Pigsty Infra Repo](https://pigsty.io/docs/repo/infra/)


--------

## Docker

You can find pre-built amd64/arm64 docker images here: [pgsty/pg_exporter](https://hub.docker.com/r/pgsty/pg_exporter)


--------

## Deployment

Redhat rpm and Debian/Ubuntu deb packages are made with `nfpm` for `x86/arm64`:

* `/usr/bin/pg_exporter`: the pg_exporter binary.
* [`/etc/pg_exporter.yml`](pg_exporter.yml): the config file
* [`/usr/lib/systemd/system/pg_exporter.service`](package/pg_exporter.service): systemd service file
* [`/etc/default/pg_exporter`](package/pg_exporter.default): systemd service envs & options


Which is also available on Pigsty's [Infra Repo](https://pigsty.io/docs/repo/infra).


------

## Collectors

Configs lie in the core of `pg_exporter`. Actually, this project contains more lines of YAML than go.

* A monolith battery-included config file: [`pg_exporter.yml`](pg_exporter.yml)
* Separated metrics definition in [`config/collector`](config/)
* Example of how to write a config file:  [`doc.yml`](config/0000-doc.yml)
* Legacy config bundle for PostgreSQL 9.1 - 9.6: [`legacy/`](legacy/) ([`legacy/README.md`](legacy/README.md))

Current `pg_exporter` is shipped with the following metrics collector definition files

- [0000-doc.yml](config/0000-doc.yml)
- [0110-pg.yml](config/0110-pg.yml)
- [0120-pg_meta.yml](config/0120-pg_meta.yml)
- [0130-pg_setting.yml](config/0130-pg_setting.yml)
- [0210-pg_repl.yml](config/0210-pg_repl.yml)
- [0220-pg_sync_standby.yml](config/0220-pg_sync_standby.yml)
- [0230-pg_downstream.yml](config/0230-pg_downstream.yml)
- [0240-pg_slot.yml](config/0240-pg_slot.yml)
- [0250-pg_recv.yml](config/0250-pg_recv.yml)
- [0260-pg_sub.yml](config/0260-pg_sub.yml)
- [0270-pg_origin.yml](config/0270-pg_origin.yml)
- [0300-pg_io.yml](config/0300-pg_io.yml)
- [0310-pg_size.yml](config/0310-pg_size.yml)
- [0320-pg_archiver.yml](config/0320-pg_archiver.yml)
- [0330-pg_bgwriter.yml](config/0330-pg_bgwriter.yml)
- [0331-pg_checkpointer.yml](config/0331-pg_checkpointer.yml)
- [0340-pg_ssl.yml](config/0340-pg_ssl.yml)
- [0350-pg_checkpoint.yml](config/0350-pg_checkpoint.yml)
- [0355-pg_timeline.yml](config/0355-pg_timeline.yml)
- [0360-pg_recovery.yml](config/0360-pg_recovery.yml)
- [0370-pg_slru.yml](config/0370-pg_slru.yml)
- [0380-pg_shmem.yml](config/0380-pg_shmem.yml)
- [0390-pg_wal.yml](config/0390-pg_wal.yml)
- [0410-pg_activity.yml](config/0410-pg_activity.yml)
- [0420-pg_wait.yml](config/0420-pg_wait.yml)
- [0430-pg_backend.yml](config/0430-pg_backend.yml)
- [0440-pg_xact.yml](config/0440-pg_xact.yml)
- [0450-pg_lock.yml](config/0450-pg_lock.yml)
- [0460-pg_query.yml](config/0460-pg_query.yml)
- [0510-pg_vacuuming.yml](config/0510-pg_vacuuming.yml)
- [0520-pg_indexing.yml](config/0520-pg_indexing.yml)
- [0530-pg_clustering.yml](config/0530-pg_clustering.yml)
- [0540-pg_backup.yml](config/0540-pg_backup.yml)
- [0610-pg_db.yml](config/0610-pg_db.yml)
- [0620-pg_db_confl.yml](config/0620-pg_db_confl.yml)
- [0640-pg_pubrel.yml](config/0640-pg_pubrel.yml)
- [0650-pg_subrel.yml](config/0650-pg_subrel.yml)
- [0700-pg_table.yml](config/0700-pg_table.yml)
- [0710-pg_index.yml](config/0710-pg_index.yml)
- [0720-pg_func.yml](config/0720-pg_func.yml)
- [0730-pg_seq.yml](config/0730-pg_seq.yml)
- [0740-pg_relkind.yml](config/0740-pg_relkind.yml)
- [0750-pg_defpart.yml](config/0750-pg_defpart.yml)
- [0810-pg_table_size.yml](config/0810-pg_table_size.yml)
- [0820-pg_table_bloat.yml](config/0820-pg_table_bloat.yml)
- [0830-pg_index_bloat.yml](config/0830-pg_index_bloat.yml)
- [0910-pgbouncer_list.yml](config/0910-pgbouncer_list.yml)
- [0920-pgbouncer_database.yml](config/0920-pgbouncer_database.yml)
- [0930-pgbouncer_stat.yml](config/0930-pgbouncer_stat.yml)
- [0940-pgbouncer_pool.yml](config/0940-pgbouncer_pool.yml)
- [1000-pg_wait_event.yml](config/1000-pg_wait_event.yml)
- [1800-pg_tsdb_hypertable.yml](config/1800-pg_tsdb_hypertable.yml)
- [1900-pg_citus.yml](config/1900-pg_citus.yml)
- [2000-pg_heartbeat.yml](config/2000-pg_heartbeat.yml)


> #### Note
>
> Supported version: PostgreSQL 10, 11, 12, 13, 14, 15, 16, 17, 18+
>
> But you can still get PostgreSQL 9.1 - 9.6 support by switching to the [`legacy/pg_exporter.yml`](legacy/pg_exporter.yml) config

`pg_exporter` will generate approximately 600 metrics for a completely new database cluster.
For a real-world database with 10 ~ 100 tables, it may generate several 1k ~ 10k metrics. 

You may need to modify or disable some database-level metrics on a database with several thousand or more tables to complete the scrape in time.

Config files are using YAML format, there are lots of examples in the [conf](https://github.com/pgsty/pg_exporter/tree/main/config/collector) dir. and here is a [sample](config/0000-doc.yml) config.

```
#==============================================================#
# 1. Config File
#==============================================================#
# The configuration file for pg_exporter is a YAML file.
# Default configurations are retrieved via following precedence:
#     1. command line args:      --config=<config path>
#     2. environment variables:  PG_EXPORTER_CONFIG=<config path>
#     3. pg_exporter.yml        (Current directory)
#     4. /etc/pg_exporter.yml   (config file)
#     5. /etc/pg_exporter       (config dir)

#==============================================================#
# 2. Config Format
#==============================================================#
# pg_exporter config could be a single YAML file, or a directory containing a series of separated YAML files.
# Each YAML config file consists of one or more metrics Collector definition, which are top-level objects.
# If a directory is provided, all YAML files in that directory (non-recursive; subdirectories are ignored) will be merged in alphabetic order.
# Collector definition examples are shown below.

#==============================================================#
# 3. Collector Example
#==============================================================#
#  # Here is an example of a metrics collector definition
#  pg_primary_only:       # Collector branch name. Must be UNIQUE among the entire configuration
#    name: pg             # Collector namespace, used as METRIC PREFIX, set to branch name by default, can be override
#                         # the same namespace may contain multiple collector branches. It`s the user`s responsibility
#                         # to make sure that AT MOST ONE collector is picked for each namespace.
#
#    desc: PostgreSQL basic information (on primary)                 # Collector description
#    query: |                                                        # Metrics Query SQL
#
#      SELECT extract(EPOCH FROM CURRENT_TIMESTAMP)                  AS timestamp,
#             pg_current_wal_lsn() - '0/0'                           AS lsn,
#             pg_current_wal_insert_lsn() - '0/0'                    AS insert_lsn,
#             pg_current_wal_lsn() - '0/0'                           AS write_lsn,
#             pg_current_wal_flush_lsn() - '0/0'                     AS flush_lsn,
#             extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime,
#             extract(EPOCH FROM now() - pg_conf_load_time())        AS conf_reload_time,
#             pg_is_in_backup()                                      AS is_in_backup,
#             extract(EPOCH FROM now() - pg_backup_start_time())     AS backup_time;
#
#                             # [OPTIONAL] metadata fields, control collector behavior
#    ttl: 10                  # Cache TTL: in seconds, how long will pg_exporter cache this collector`s query result.
#    timeout: 0.1             # Query Timeout: in seconds, queries that exceed this limit will be canceled.
#    min_version: 100000      # minimal supported version, boundary IS included. In server version number format,
#    max_version: 130000      # maximal supported version, boundary NOT included, In server version number format
#    fatal: false             # Collector marked `fatal` fails, the entire scrape will abort immediately and marked as failed
#    skip: false              # Collector marked `skip` will not be installed during the planning procedure
#
#    tags: [cluster, primary] # Collector tags, used for planning and scheduling
#
#    # tags are list of strings, which could be:
#    #   * `cluster` marks this query as cluster level, so it will only execute once for the same PostgreSQL Server
#    #   * `primary` or `master`  mark this query can only run on a primary instance (WILL NOT execute if pg_is_in_recovery())
#    #   * `standby` or `replica` mark this query can only run on a replica instance (WILL execute if pg_is_in_recovery())
#    # some special tag prefix have special interpretation:
#    #   * `dbname:<dbname>` means this query will ONLY be executed on database with name `<dbname>`
#    #   * `username:<user>` means this query will only be executed when connect with user `<user>`
#    #   * `extension:<extname>` means this query will only be executed when extension `<extname>` is installed
#    #   * `schema:<nspname>` means this query will only by executed when schema `<nspname>` exist
#    #   * `not:<negtag>` means this query WILL NOT be executed when exporter is tagged with `<negtag>`
#    #   * `<tag>` means this query WILL be executed when exporter is tagged with `<tag>`
#    #   ( <tag> could not be cluster,primary,standby,master,replica,etc...)
#
#    # One or more "predicate queries" may be defined for a metric query. These
#    # are run before the main metric query (after any cache hit check). If all
#    # of them, when run sequentially, return a single row with a single column
#    # boolean true result, the main metric query is executed. If any of them
#    # return false or return zero rows, the main query is skipped. If any
#    # predicate query returns more than one row, a non-boolean result, or fails
#    # with an error, the whole query is marked failed. Predicate queries can be
#    # used to check for the presence of specific functions, tables, extensions,
#    # settings, and vendor-specific pg features before running the main query.
#
#    predicate_queries:
#      - name: predicate query name
#        predicate_query: |
#          SELECT EXISTS (SELECT 1 FROM information_schema.routines WHERE routine_schema = 'pg_catalog' AND routine_name = 'pg_backup_start_time');
#
#    metrics:                 # List of returned columns, each column must have a `name` and `usage`, `rename` and `description` are optional
#      - timestamp:           # Column name, should be exactly the same as returned column name
#          usage: GAUGE       # Metric type, `usage` could be
#                                  * DISCARD: completely ignoring this field
#                                  * LABEL:   use columnName=columnValue as a label in metric
#                                  * GAUGE:   Mark column as a gauge metric, full name will be `<query.name>_<column.name>`
#                                  * COUNTER: Same as above, except it is a counter rather than a gauge.
#          rename: ts         # [OPTIONAL] Alias, optional, the alias will be used instead of the column name
#          description: xxxx  # [OPTIONAL] Description of the column, will be used as a metric description
#          default: 0         # [OPTIONAL] Default value, will be used when column is NULL
#          scale:   1000      # [OPTIONAL] Scale the value by this factor
#      - lsn:
#          usage: COUNTER
#          description: log sequence number, current write location (on primary)
#      - insert_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal inserting
#      - write_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal writing
#      - flush_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal syncing
#      - uptime:
#          usage: GAUGE
#          description: seconds since postmaster start
#      - conf_reload_time:
#          usage: GAUGE
#          description: seconds since last configuration reload
#      - is_in_backup:
#          usage: GAUGE
#          description: 1 if backup is in progress
#      - backup_time:
#          usage: GAUGE
#          description: seconds since the current backup start. null if don`t have one
#
#      .... # you can also use rename & scale to customize the metric name and value:
#      - checkpoint_write_time:
#          rename: write_time
#          usage: COUNTER
#          scale: 1e-3
#          description: Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds

#==============================================================#
# 4. Collector Presets
#==============================================================#
# pg_exporter is shipped with a series of preset collectors (already numbered and ordered by filename)
#
# 1xx  Basic metrics:        basic info, metadata, settings
# 2xx  Replication metrics:  replication, walreceiver, downstream, sync standby, slots, subscription
# 3xx  Persist metrics:      size, wal, background writer, checkpointer, ssl, checkpoint, recovery, slru cache, shmem usage
# 4xx  Activity metrics:     backend count group by state, wait event, locks, xacts, queries
# 5xx  Progress metrics:     clustering, vacuuming, indexing, basebackup, copy
# 6xx  Database metrics:     pg_database, publication, subscription
# 7xx  Object metrics:       pg_class, table, index, function, sequence, default partition
# 8xx  Optional metrics:     optional metrics collector (disable by default, slow queries)
# 9xx  Pgbouncer metrics:    metrics from pgbouncer admin database `pgbouncer`
#
# 100-599 Metrics for entire database cluster  (scrape once)
# 600-899 Metrics for single database instance (scrape for each database ,except for pg_db itself)

#==============================================================#
# 5. Cache TTL
#==============================================================#
# Cache can be used for reducing query overhead, it can be enabled by setting a non-zero value for `ttl`
# It is highly recommended to use cache to avoid duplicate scrapes. Especially when you got multiple Prometheus
# scraping the same instance with slow monitoring queries. Setting `ttl` to zero or leaving blank will disable
# result caching, which is the default behavior
#
# TTL has to be smaller than your scrape interval. 15s scrape interval and 10s TTL is a good start for
# production environment. Some expensive monitoring queries (such as size/bloat check) will have longer `ttl`
# which can also be used as a mechanism to achieve `different scrape frequency`

#==============================================================#
# 6. Query Timeout
#==============================================================#
# Collectors can be configured with an optional Timeout. If the collector's query executes more than that
# timeout, it will be canceled immediately. Setting the `timeout` to 0 or leaving blank will reset it to
# default timeout 0.1 (100ms). Setting it to any negative number will disable the query timeout feature.
# All queries have a default timeout of 100ms, if exceeded, the query will be canceled immediately to avoid
# avalanche. You can explicitly overwrite that option. but beware: in some extreme cases, if all your
# timeouts sum up greater your scrape/cache interval (usually 15s), the queries may still be jammed.
# or, you can just disable potential slow queries.

#==============================================================#
# 7. Version Compatibility
#==============================================================#
# Each collector has two optional version compatibility parameters: `min_version` and `max_version`.
# These two parameters specify the version compatibility of the collector. If target postgres/pgbouncer's
# version is less than `min_version`, or higher than `max_version`, the collector will not be installed.
# These two parameters are using PostgreSQL server version number format, which is a 6-digit integer
# format as <major:2 digit><minor:2 digit>:<release: 2 digit>.
# For example, 090600 stands for 9.6, and 120100 stands for 12.1
# And beware that version compatibility range is left-inclusive right exclusive: [min, max), set to zero or
# leaving blank will affect as -inf or +inf

#==============================================================#
# 8. Fatality
#==============================================================#
# If a collector is marked with `fatal` falls, the entire scrape operation will be marked as fail and key metrics
# `pg_up` / `pgbouncer_up` will be reset to 0. It is always a good practice to set up AT LEAST ONE fatal
# collector for pg_exporter. `pg.pg_primary_only` and `pgbouncer_list` are the default fatal collector.
#
# If a collector without `fatal` flag fails, it will increase global fail counters. But the scrape operation
# will carry on. The entire scrape result will not be marked as faile, thus will not affect the `<xx>_up` metric.

#==============================================================#
# 9. Skip
#==============================================================#
# Collector with `skip` flag set to true will NOT be installed.
# This could be a handy option to disable collectors

#==============================================================#
# 10. Tags and Planning
#==============================================================#
# Tags are designed for collector planning & schedule. It can be handy to customize which queries run
# on which instances. And thus you can use one-single monolith config for multiple environments
#
#  Tags are a list of strings, each string could be:
#  Pre-defined special tags
#    * `cluster` marks this collector as cluster level, so it will ONLY BE EXECUTED ONCE for the same PostgreSQL Server
#    * `primary` or `master` mark this collector as primary-only, so it WILL NOT work iff pg_is_in_recovery()
#    * `standby` or `replica` mark this collector as replica-only, so it WILL work iff pg_is_in_recovery()
#  Special tag prefix which have different interpretation:
#    * `dbname:<dbname>` means this collector will ONLY work on database with name `<dbname>`
#    * `username:<user>` means this collector will ONLY work when connect with user `<user>`
#    * `extension:<extname>` means this collector will ONLY work when extension `<extname>` is installed
#    * `schema:<nspname>` means this collector will only work when schema `<nspname>` exists
#  Customized positive tags (filter) and negative tags (taint)
#    * `not:<negtag>` means this collector WILL NOT work when exporter is tagged with `<negtag>`
#    * `<tag>` means this query WILL work if exporter is tagged with `<tag>` (special tags not included)
#
#  pg_exporter will trigger the Planning procedure after connecting to the target. It will gather database facts
#  and match them with tags and other metadata (such as supported version range). Collector will only
#  be installed if and only if it is compatible with the target server.

```



--------------------

## About

Author: [Vonng](https://vonng.com/en) ([rh@vonng.com](mailto:rh@vonng.com))

Contributors: https://github.com/pgsty/pg_exporter/graphs/contributors

License: [Apache-2.0](LICENSE)

Copyright: 2018-2026 rh@vonng.com

<p align="center">
  <img src="static/logo.png" alt="PG Exporter Logo" height="128" align="middle">
</p>


================================================
FILE: config/0000-doc.yml
================================================
#==============================================================#
# Desc      :   pg_exporter metrics collector definition
# Ver       :   PostgreSQL 10 ~ 18+ and pgbouncer 1.9~1.25+
# Ctime     :   2019-12-09
# Mtime     :   2026-03-21
# Homepage  :   https://pigsty.io
# Author    :   Ruohang Feng (rh@vonng.com)
# License   :   Apache-2.0 @ https://github.com/pgsty/pg_exporter
# Copyright :   2018-2026  Ruohang Feng / Vonng (rh@vonng.com)
#==============================================================#


#==============================================================#
# 1. Config File
#==============================================================#
# The configuration file for pg_exporter is a YAML file.
# Default configurations are retrieved via following precedence:
#     1. command line args:      --config=<config path>
#     2. environment variables:  PG_EXPORTER_CONFIG=<config path>
#     3. pg_exporter.yml        (Current directory)
#     4. /etc/pg_exporter.yml   (config file)
#     5. /etc/pg_exporter       (config dir)

#==============================================================#
# 2. Config Format
#==============================================================#
# pg_exporter config could be a single YAML file, or a directory containing a series of separated YAML files.
# Each YAML config file consists of one or more metrics Collector definition, which are top-level objects.
# If a directory is provided, all YAML in that directory will be merged in alphabetic order.
# Collector definition examples are shown below.

#==============================================================#
# 3. Collector Example
#==============================================================#
#  # Here is an example of a metrics collector definition
#  pg_primary_only:       # Collector branch name. Must be UNIQUE among the entire configuration
#    name: pg             # Collector namespace, used as METRIC PREFIX, set to branch name by default, can be override
#                         # the same namespace may contain multiple collector branches. It`s the user`s responsibility
#                         # to make sure that AT MOST ONE collector is picked for each namespace.
#
#    desc: PostgreSQL basic information (on primary)                 # Collector description
#    query: |                                                        # Metrics Query SQL
#
#      SELECT extract(EPOCH FROM CURRENT_TIMESTAMP)                  AS timestamp,
#             pg_current_wal_lsn() - '0/0'                           AS lsn,
#             pg_current_wal_insert_lsn() - '0/0'                    AS insert_lsn,
#             pg_current_wal_lsn() - '0/0'                           AS write_lsn,
#             pg_current_wal_flush_lsn() - '0/0'                     AS flush_lsn,
#             extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime,
#             extract(EPOCH FROM now() - pg_conf_load_time())        AS conf_reload_time,
#             pg_is_in_backup()                                      AS is_in_backup,
#             extract(EPOCH FROM now() - pg_backup_start_time())     AS backup_time;
#
#                             # [OPTIONAL] metadata fields, control collector behavior
#    ttl: 10                  # Cache TTL: in seconds, how long will pg_exporter cache this collector`s query result.
#    timeout: 0.1             # Query Timeout: in seconds, queries that exceed this limit will be canceled.
#    min_version: 100000      # minimal supported version, boundary IS included. In server version number format,
#    max_version: 130000      # maximal supported version, boundary NOT included, In server version number format
#    fatal: false             # Collector marked `fatal` fails, the entire scrape will abort immediately and marked as failed
#    skip: false              # Collector marked `skip` will not be installed during the planning procedure
#
#    tags: [cluster, primary] # Collector tags, used for planning and scheduling
#
#    # tags are list of strings, which could be:
#    #   * `cluster` marks this query as cluster level, so it will only execute once for the same PostgreSQL Server
#    #   * `primary` or `master`  mark this query can only run on a primary instance (WILL NOT execute if pg_is_in_recovery())
#    #   * `standby` or `replica` mark this query can only run on a replica instance (WILL execute if pg_is_in_recovery())
#    # some special tag prefix have special interpretation:
#    #   * `dbname:<dbname>` means this query will ONLY be executed on database with name `<dbname>`
#    #   * `username:<user>` means this query will only be executed when connect with user `<user>`
#    #   * `extension:<extname>` means this query will only be executed when extension `<extname>` is installed
#    #   * `schema:<nspname>` means this query will only by executed when schema `<nspname>` exist
#    #   * `not:<negtag>` means this query WILL NOT be executed when exporter is tagged with `<negtag>`
#    #   * `<tag>` means this query WILL be executed when exporter is tagged with `<tag>`
#    #   ( <tag> could not be cluster,primary,standby,master,replica,etc...)
#
#    # One or more "predicate queries" may be defined for a metric query. These
#    # are run before the main metric query (after any cache hit check). If all
#    # of them, when run sequentially, return a single row with a single column
#    # boolean true result, the main metric query is executed. If any of them
#    # return false or return zero rows, the main query is skipped. If any
#    # predicate query returns more than one row, a non-boolean result, or fails
#    # with an error, the whole query is marked failed. Predicate queries can be
#    # used to check for the presence of specific functions, tables, extensions,
#    # settings, and vendor-specific pg features before running the main query.
#
#    predicate_queries:
#      - name: predicate query name
#        predicate_query: |
#          SELECT EXISTS (SELECT 1 FROM information_schema.routines WHERE routine_schema = 'pg_catalog' AND routine_name = 'pg_backup_start_time');
#
#    metrics:                 # List of returned columns, each column must have a `name` and `usage`, `rename` and `description` are optional
#      - timestamp:           # Column name, should be exactly the same as returned column name
#          usage: GAUGE       # Metric type, `usage` could be
#                                  * DISCARD: completely ignoring this field
#                                  * LABEL:   use columnName=columnValue as a label in metric
#                                  * GAUGE:   Mark column as a gauge metric, full name will be `<query.name>_<column.name>`
#                                  * COUNTER: Same as above, except it is a counter rather than a gauge.
#          rename: ts         # [OPTIONAL] Alias, optional, the alias will be used instead of the column name
#          description: xxxx  # [OPTIONAL] Description of the column, will be used as a metric description
#          default: 0         # [OPTIONAL] Default value, will be used when column is NULL
#          scale:   1000      # [OPTIONAL] Scale the value by this factor
#      - lsn:
#          usage: COUNTER
#          description: log sequence number, current write location (on primary)
#      - insert_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal inserting
#      - write_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal writing
#      - flush_lsn:
#          usage: COUNTER
#          description: primary only, location of current wal syncing
#      - uptime:
#          usage: GAUGE
#          description: seconds since postmaster start
#      - conf_reload_time:
#          usage: GAUGE
#          description: seconds since last configuration reload
#      - is_in_backup:
#          usage: GAUGE
#          description: 1 if backup is in progress
#      - backup_time:
#          usage: GAUGE
#          description: seconds since the current backup start. null if don`t have one
#
#      .... # you can also use rename & scale to customize the metric name and value:
#      - checkpoint_write_time:
#          rename: write_time
#          usage: COUNTER
#          scale: 1e-3
#          description: Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds

#==============================================================#
# 4. Collector Presets
#==============================================================#
# pg_exporter is shipped with a series of preset collectors (already numbered and ordered by filename)
#
# 1xx  Basic metrics:        basic info, metadata, settings
# 2xx  Replication metrics:  replication, walreceiver, downstream, sync standby, slots, subscription
# 3xx  Persist metrics:      size, wal, background writer, checkpointer, ssl, checkpoint, recovery, slru cache, shmem usage
# 4xx  Activity metrics:     backend count group by state, wait event, locks, xacts, queries
# 5xx  Progress metrics:     clustering, vacuuming, indexing, basebackup, copy
# 6xx  Database metrics:     pg_database, publication, subscription
# 7xx  Object metrics:       pg_class, table, index, function, sequence, default partition
# 8xx  Optional metrics:     optional metrics collector (disable by default, slow queries)
# 9xx  Pgbouncer metrics:    metrics from pgbouncer admin database `pgbouncer`
#
# 100-599 Metrics for entire database cluster  (scrape once)
# 600-899 Metrics for single database instance (scrape for each database ,except for pg_db itself)

#==============================================================#
# 5. Cache TTL
#==============================================================#
# Cache can be used for reducing query overhead, it can be enabled by setting a non-zero value for `ttl`
# It is highly recommended to use cache to avoid duplicate scrapes. Especially when you got multiple Prometheus
# scraping the same instance with slow monitoring queries. Setting `ttl` to zero or leaving blank will disable
# result caching, which is the default behavior
#
# TTL has to be smaller than your scrape interval. 15s scrape interval and 10s TTL is a good start for
# production environment. Some expensive monitoring queries (such as size/bloat check) will have longer `ttl`
# which can also be used as a mechanism to achieve `different scrape frequency`

#==============================================================#
# 6. Query Timeout
#==============================================================#
# Collectors can be configured with an optional Timeout. If the collector's query executes more than that
# timeout, it will be canceled immediately. Setting the `timeout` to 0 or leaving blank will reset it to
# default timeout 0.1 (100ms). Setting it to any negative number will disable the query timeout feature.
# All queries have a default timeout of 100ms, if exceeded, the query will be canceled immediately to avoid
# avalanche. You can explicitly overwrite that option. but beware: in some extreme cases, if all your
# timeouts sum up greater your scrape/cache interval (usually 15s), the queries may still be jammed.
# or, you can just disable potential slow queries.

#==============================================================#
# 7. Version Compatibility
#==============================================================#
# Each collector has two optional version compatibility parameters: `min_version` and `max_version`.
# These two parameters specify the version compatibility of the collector. If target postgres/pgbouncer's
# version is less than `min_version`, or higher than `max_version`, the collector will not be installed.
# These two parameters are using PostgreSQL server version number format, which is a 6-digit integer
# format as <major:2 digit><minor:2 digit>:<release: 2 digit>.
# For example, 090600 stands for 9.6, and 120100 stands for 12.1
# And beware that version compatibility range is left-inclusive right exclusive: [min, max), set to zero or
# leaving blank will affect as -inf or +inf

#==============================================================#
# 8. Fatality
#==============================================================#
# If a collector is marked with `fatal` falls, the entire scrape operation will be marked as fail and key metrics
# `pg_up` / `pgbouncer_up` will be reset to 0. It is always a good practice to set up AT LEAST ONE fatal
# collector for pg_exporter. `pg.pg_primary_only` and `pgbouncer_list` are the default fatal collector.
#
# If a collector without `fatal` flag fails, it will increase global fail counters. But the scrape operation
# will carry on. The entire scrape result will not be marked as faile, thus will not affect the `<xx>_up` metric.

#==============================================================#
# 9. Skip
#==============================================================#
# Collector with `skip` flag set to true will NOT be installed.
# This could be a handy option to disable collectors

#==============================================================#
# 10. Tags and Planning
#==============================================================#
# Tags are designed for collector planning & schedule. It can be handy to customize which queries run
# on which instances. And thus you can use one-single monolith config for multiple environments
#
#  Tags are a list of strings, each string could be:
#  Pre-defined special tags
#    * `cluster` marks this collector as cluster level, so it will ONLY BE EXECUTED ONCE for the same PostgreSQL Server
#    * `primary` or `master` mark this collector as primary-only, so it WILL NOT work iff pg_is_in_recovery()
#    * `standby` or `replica` mark this collector as replica-only, so it WILL work iff pg_is_in_recovery()
#  Special tag prefix which have different interpretation:
#    * `dbname:<dbname>` means this collector will ONLY work on database with name `<dbname>`
#    * `username:<user>` means this collector will ONLY work when connect with user `<user>`
#    * `extension:<extname>` means this collector will ONLY work when extension `<extname>` is installed
#    * `schema:<nspname>` means this collector will only work when schema `<nspname>` exists
#  Customized positive tags (filter) and negative tags (taint)
#    * `not:<negtag>` means this collector WILL NOT work when exporter is tagged with `<negtag>`
#    * `<tag>` means this query WILL work if exporter is tagged with `<tag>` (special tags not included)
#
#  pg_exporter will trigger the Planning procedure after connecting to the target. It will gather database facts
#  and match them with tags and other metadata (such as supported version range). Collector will only
#  be installed if and only if it is compatible with the target server.




================================================
FILE: config/0110-pg.yml
================================================
#==============================================================#
# 0110 pg
#==============================================================#
pg_primary_only:
  name: pg
  desc: PostgreSQL basic information (on primary)
  query: |-
    SELECT 
      extract(EPOCH FROM CURRENT_TIMESTAMP)                  AS timestamp,
      extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime,
      extract(EPOCH FROM pg_postmaster_start_time())         AS boot_time,
      pg_current_wal_lsn() - '0/0'                           AS lsn,
      pg_current_wal_insert_lsn() - '0/0'                    AS insert_lsn,
      pg_current_wal_lsn() - '0/0'                           AS write_lsn,
      pg_current_wal_flush_lsn() - '0/0'                     AS flush_lsn,
      NULL::BIGINT                                           AS receive_lsn,
      NULL::BIGINT                                           AS replay_lsn,
      extract(EPOCH FROM pg_conf_load_time())                AS reload_time,
      extract(EPOCH FROM now() - pg_conf_load_time())        AS conf_reload_time,
      NULL::FLOAT                                            AS last_replay_time,
      0::FLOAT                                               AS lag,
      pg_is_in_recovery()                                    AS is_in_recovery,
      FALSE                                                  AS is_wal_replay_paused;
  tags: [ cluster, primary ]
  ttl: 1
  min_version: 100000
  fatal: true
  skip: false
  metrics:
    - timestamp:            { usage: GAUGE   ,description: "current database timestamp in unix epoch" }
    - uptime:               { usage: GAUGE   ,description: "seconds since postmaster start" }
    - boot_time:            { usage: GAUGE   ,description: "postmaster boot timestamp in unix epoch" }
    - lsn:                  { usage: COUNTER ,description: "log sequence number, current write location" }
    - insert_lsn:           { usage: COUNTER ,description: "primary only, location of current wal inserting" }
    - write_lsn:            { usage: COUNTER ,description: "primary only, location of current wal writing" }
    - flush_lsn:            { usage: COUNTER ,description: "primary only, location of current wal syncing" }
    - receive_lsn:          { usage: COUNTER ,description: "replica only, location of wal synced to disk" }
    - replay_lsn:           { usage: COUNTER ,description: "replica only, location of wal applied" }
    - reload_time:          { usage: GAUGE   ,description: "time when configuration was last reloaded" }
    - conf_reload_time:     { usage: GAUGE   ,description: "seconds since last configuration reload" }
    - last_replay_time:     { usage: GAUGE   ,description: "time when last transaction been replayed" }
    - lag:                  { usage: GAUGE   ,description: "replica only, replication lag in seconds" }
    - is_in_recovery:       { usage: GAUGE   ,description: "1 if in recovery mode" }
    - is_wal_replay_paused: { usage: GAUGE   ,description: "1 if wal play is paused" }

pg_replica_only:
  name: pg
  desc: PostgreSQL basic information (on replica)
  query: |-
    SELECT 
      extract(EPOCH FROM CURRENT_TIMESTAMP)                  AS timestamp,
      extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime,
      extract(EPOCH FROM pg_postmaster_start_time())         AS boot_time,
      pg_last_wal_replay_lsn() - '0/0'                       AS lsn,
      NULL::BIGINT                                           AS insert_lsn,
      NULL::BIGINT                                           AS write_lsn,
      NULL::BIGINT                                           AS flush_lsn,
      pg_last_wal_receive_lsn() - '0/0'                      AS receive_lsn,
      pg_last_wal_replay_lsn() - '0/0'                       AS replay_lsn,
      extract(EPOCH FROM pg_conf_load_time())                AS reload_time,
      extract(EPOCH FROM now() - pg_conf_load_time())        AS conf_reload_time,
      extract(EPOCH FROM pg_last_xact_replay_timestamp())    AS last_replay_time,
      CASE WHEN pg_last_wal_receive_lsn() = pg_last_wal_replay_lsn() THEN 0
          ELSE EXTRACT(EPOCH FROM now() - pg_last_xact_replay_timestamp()) END AS lag,
      pg_is_in_recovery() AS is_in_recovery,
      pg_is_wal_replay_paused() AS is_wal_replay_paused;

  tags: [ cluster, replica ]
  ttl: 1
  min_version: 100000
  fatal: true
  skip: false
  metrics:
    - timestamp:            { usage: GAUGE   ,description: "current database timestamp in unix epoch" }
    - uptime:               { usage: GAUGE   ,description: "seconds since postmaster start" }
    - boot_time:            { usage: GAUGE   ,description: "postmaster boot timestamp in unix epoch" }
    - lsn:                  { usage: COUNTER ,description: "log sequence number, current write location" }
    - insert_lsn:           { usage: COUNTER ,description: "primary only, location of current wal inserting" }
    - write_lsn:            { usage: COUNTER ,description: "primary only, location of current wal writing" }
    - flush_lsn:            { usage: COUNTER ,description: "primary only, location of current wal syncing" }
    - receive_lsn:          { usage: COUNTER ,description: "replica only, location of wal synced to disk" }
    - replay_lsn:           { usage: COUNTER ,description: "replica only, location of wal applied" }
    - reload_time:          { usage: GAUGE   ,description: "time when configuration was last reloaded" }
    - conf_reload_time:     { usage: GAUGE   ,description: "seconds since last configuration reload" }
    - last_replay_time:     { usage: GAUGE   ,description: "time when last transaction been replayed" }
    - lag:                  { usage: GAUGE   ,description: "replica only, replication lag in seconds" }
    - is_in_recovery:       { usage: GAUGE   ,description: "1 if in recovery mode" }
    - is_wal_replay_paused: { usage: GAUGE   ,description: "1 if wal play is paused" }




================================================
FILE: config/0120-pg_meta.yml
================================================
#==============================================================#
# 0120 pg_meta
#==============================================================#
pg_meta_13:
  name: pg_meta
  desc: PostgreSQL meta info for pg 13+, with extra primary conninfo
  query: |
    SELECT 
      (SELECT system_identifier FROM pg_control_system()) AS cluster_id,
      current_setting('cluster_name')                     AS cluster_name,
      current_setting('port')                             AS listen_port,
      current_setting('data_directory', true)             AS data_dir,
      current_setting('config_file', true)                AS conf_path,
      current_setting('hba_file', true)                   AS hba_path,
      current_setting('wal_level')                        AS wal_level,
      current_setting('server_encoding')                  AS encoding,
      current_setting('server_version')                   AS version,
      current_setting('server_version_num')               AS ver_num,
      version()                                           AS ver_str,
      current_setting('shared_preload_libraries', true)   AS extensions,
      current_setting('primary_conninfo', true)           AS primary_conninfo,
      1                                                   AS info
  ttl: 10
  min_version: 130000
  tags: [ cluster ]
  metrics:
    - cluster_id:        { usage: LABEL ,description: "cluster system identifier" }
    - cluster_name:      { usage: LABEL ,description: "cluster name" }
    - listen_port:       { usage: LABEL ,description: "listen port" }
    - data_dir:          { usage: LABEL ,description: "path to data directory" }
    - conf_path:         { usage: LABEL ,description: "path to postgresql.conf" }
    - hba_path:          { usage: LABEL ,description: "path to pg_hba.conf" }
    - wal_level:         { usage: LABEL ,description: "wal level" }
    - encoding:          { usage: LABEL ,description: "server encoding" }
    - version:           { usage: LABEL ,description: "server version in human-readable format" }
    - ver_num:           { usage: LABEL ,description: "server version number in machine-readable format" }
    - ver_str:           { usage: LABEL ,description: "complete version string" }
    - extensions:        { usage: LABEL ,description: "server installed preload libraries" }
    - primary_conninfo:  { usage: LABEL ,description: "connection string to upstream (do not set password here)" }
    - info:              { usage: GAUGE ,description: "constant 1" }

pg_meta_10:
  name: pg_meta
  desc: PostgreSQL meta info
  query: |
    SELECT 
      (SELECT system_identifier FROM pg_control_system()) AS cluster_id,
      current_setting('cluster_name')                     AS cluster_name,
      current_setting('port')                             AS listen_port,
      current_setting('data_directory', true)             AS data_dir,
      current_setting('config_file', true)                AS conf_path,
      current_setting('hba_file', true)                   AS hba_path,
      current_setting('wal_level')                        AS wal_level,
      current_setting('server_encoding')                  AS encoding,
      current_setting('server_version')                   AS version,
      current_setting('server_version_num')               AS ver_num,
      version()                                           AS ver_str,
      current_setting('shared_preload_libraries', true)   AS extensions,
      'N/A'                                               AS primary_conninfo,
      1                                                   AS info
  ttl: 10
  min_version: 090600
  max_version: 130000
  tags: [ cluster ]
  metrics:
    - cluster_id:        { usage: LABEL ,description: "cluster system identifier" }
    - cluster_name:      { usage: LABEL ,description: "cluster name" }
    - listen_port:       { usage: LABEL ,description: "listen port" }
    - data_dir:          { usage: LABEL ,description: "path to data directory" }
    - conf_path:         { usage: LABEL ,description: "path to postgresql.conf" }
    - hba_path:          { usage: LABEL ,description: "path to pg_hba.conf" }
    - wal_level:         { usage: LABEL ,description: "wal level" }
    - encoding:          { usage: LABEL ,description: "server encoding" }
    - version:           { usage: LABEL ,description: "server version in human-readable format" }
    - ver_num:           { usage: LABEL ,description: "server version number in machine-readable format" }
    - ver_str:           { usage: LABEL ,description: "complete version string" }
    - extensions:        { usage: LABEL ,description: "server installed preload libraries" }
    - primary_conninfo:  { usage: LABEL ,description: "connection string to upstream (do not set password here)" }
    - info:              { usage: GAUGE ,description: "constant 1" }



================================================
FILE: config/0130-pg_setting.yml
================================================
#==============================================================#
# 0130 pg_setting
#==============================================================#
# Key PostgreSQL configuration parameters
# All parameters use current_setting(name, missing_ok) for version safety
# Parameters introduced after PG10 use missing_ok=true to return NULL on older versions
pg_setting:
  name: pg_setting
  desc: PostgreSQL shared configuration parameters (shared across all databases)
  query: |
    SELECT
      current_setting('max_connections')::int                          AS max_connections,
      current_setting('max_prepared_transactions')::int                AS max_prepared_transactions,
      current_setting('max_locks_per_transaction')::int                AS max_locks_per_transaction,
      current_setting('max_worker_processes')::int                     AS max_worker_processes,
      current_setting('max_parallel_workers')::int                     AS max_parallel_workers,
      current_setting('max_parallel_workers_per_gather')::int          AS max_parallel_workers_per_gather,
      current_setting('max_parallel_maintenance_workers', true)::int   AS max_parallel_maintenance_workers,
      current_setting('max_replication_slots')::int                    AS max_replication_slots,
      current_setting('max_wal_senders')::int                          AS max_wal_senders,
      current_setting('block_size')::int                               AS block_size,
      current_setting('wal_block_size')::int                           AS wal_block_size,
      pg_size_bytes(current_setting('segment_size'))                   AS segment_size,
      pg_size_bytes(current_setting('wal_segment_size'))               AS wal_segment_size,
      CASE current_setting('data_checksums') WHEN 'on' THEN 1 ELSE 0 END AS data_checksums,
      CASE current_setting('wal_log_hints') WHEN 'on' THEN 1 ELSE 0 END AS wal_log_hints,
      CASE current_setting('fsync') WHEN 'on' THEN 1 ELSE 0 END AS fsync,
      CASE current_setting('full_page_writes') WHEN 'on' THEN 1 ELSE 0 END AS full_page_writes,
      CASE current_setting('wal_level') WHEN 'logical' THEN 3 WHEN 'replica' THEN 2 WHEN 'minimal' THEN 1 ELSE 0 END AS wal_level,
      pg_size_bytes(current_setting('min_wal_size'))                   AS min_wal_size,
      pg_size_bytes(current_setting('max_wal_size'))                   AS max_wal_size,
      pg_size_bytes(current_setting('max_slot_wal_keep_size', true))   AS max_slot_wal_keep_size,
      pg_size_bytes(current_setting('shared_buffers'))                 AS shared_buffers,
      pg_size_bytes(current_setting('work_mem'))                       AS work_mem,
      pg_size_bytes(current_setting('maintenance_work_mem'))           AS maintenance_work_mem,
      pg_size_bytes(current_setting('effective_cache_size'))           AS effective_cache_size,
      pg_size_bytes(current_setting('shared_memory_size', true))       AS shared_memory_size,
      CASE current_setting('huge_pages_status', true) WHEN 'on' THEN 1 WHEN 'off' THEN 0 WHEN 'unknown' THEN -1 ELSE NULL END AS hugepage_status,
      current_setting('shared_memory_size_in_huge_pages', true)::int   AS hugepage_count,
      CASE current_setting('archive_mode') WHEN 'off' THEN 0 WHEN 'on' THEN 1 WHEN 'always' THEN 2 ELSE -1 END AS archive_mode,
      CASE current_setting('autovacuum') WHEN 'on' THEN 1 ELSE 0 END AS autovacuum,
      current_setting('autovacuum_max_workers')::int                   AS autovacuum_max_workers,
      extract(epoch from current_setting('checkpoint_timeout')::interval)::int AS checkpoint_timeout,
      current_setting('checkpoint_completion_target')::float           AS checkpoint_completion_target,
      CASE current_setting('hot_standby') WHEN 'on' THEN 1 ELSE 0 END AS hot_standby,
      CASE current_setting('synchronous_commit')
        WHEN 'off' THEN 0 WHEN 'local' THEN 1 WHEN 'remote_write' THEN 2
        WHEN 'on' THEN 3 WHEN 'remote_apply' THEN 4 ELSE -1 END AS synchronous_commit,
      CASE current_setting('io_method', true)
        WHEN 'sync' THEN 0 WHEN 'worker' THEN 1 WHEN 'io_uring' THEN 2 ELSE NULL END AS io_method;

  ttl: 10
  min_version: 100000
  tags: [ cluster ]
  metrics:
    - max_connections:                  { usage: GAUGE ,description: "maximum number of concurrent connections to the database server" }
    - max_prepared_transactions:        { usage: GAUGE ,description: "maximum number of transactions that can be in the prepared state simultaneously" }
    - max_locks_per_transaction:        { usage: GAUGE ,description: "maximum number of locks per transaction" }
    - max_worker_processes:             { usage: GAUGE ,description: "maximum number of background processes" }
    - max_parallel_workers:             { usage: GAUGE ,description: "maximum number of parallel workers that can be active at one time" }
    - max_parallel_workers_per_gather:  { usage: GAUGE ,description: "maximum number of parallel workers per Gather node" }
    - max_parallel_maintenance_workers: { usage: GAUGE ,description: "maximum number of parallel maintenance workers (PG11+, NULL on older)" }
    - max_replication_slots:            { usage: GAUGE ,description: "maximum number of replication slots" }
    - max_wal_senders:                  { usage: GAUGE ,description: "maximum number of concurrent WAL sender connections" }
    - block_size:                       { usage: GAUGE ,description: "database block size in bytes (default 8192)" }
    - wal_block_size:                   { usage: GAUGE ,description: "WAL block size in bytes" }
    - segment_size:                     { usage: GAUGE ,description: "database file segment size in bytes" }
    - wal_segment_size:                 { usage: GAUGE ,description: "WAL segment size in bytes" }
    - data_checksums:                   { usage: GAUGE ,description: "data checksums enabled, 1=on 0=off" }
    - wal_log_hints:                    { usage: GAUGE ,description: "WAL log hints enabled, 1=on 0=off" }
    - fsync:                            { usage: GAUGE ,description: "fsync enabled (CRITICAL for data safety), 1=on 0=off" }
    - full_page_writes:                 { usage: GAUGE ,description: "full page writes enabled, 1=on 0=off" }
    - wal_level:                        { usage: GAUGE ,description: "WAL level, 1=minimal 2=replica 3=logical" }
    - min_wal_size:                     { usage: GAUGE ,description: "minimum WAL size in bytes" }
    - max_wal_size:                     { usage: GAUGE ,description: "maximum WAL size in bytes" }
    - max_slot_wal_keep_size:           { usage: GAUGE ,description: "maximum WAL size retained by replication slots in bytes (PG13+, NULL on older)" }
    - shared_buffers:                   { usage: GAUGE ,description: "shared buffer size in bytes" }
    - work_mem:                         { usage: GAUGE ,description: "work memory size in bytes" }
    - maintenance_work_mem:             { usage: GAUGE ,description: "maintenance work memory size in bytes" }
    - effective_cache_size:             { usage: GAUGE ,description: "planner's assumption about effective OS cache size in bytes" }
    - shared_memory_size:               { usage: GAUGE ,description: "total shared memory size in bytes (PG13+, NULL on older)" }
    - hugepage_status:                  { usage: GAUGE ,description: "huge pages status, 1=on 0=off -1=unknown NULL=unavailable (PG14+)" }
    - hugepage_count:                   { usage: GAUGE ,description: "number of huge pages needed for shared memory (PG14+, NULL on older)" }
    - archive_mode:                     { usage: GAUGE ,description: "archive mode, 0=off 1=on 2=always" }
    - autovacuum:                       { usage: GAUGE ,description: "autovacuum enabled, 1=on 0=off" }
    - autovacuum_max_workers:           { usage: GAUGE ,description: "maximum number of autovacuum worker processes" }
    - checkpoint_timeout:               { usage: GAUGE ,description: "checkpoint timeout in seconds" }
    - checkpoint_completion_target:     { usage: GAUGE ,description: "checkpoint completion target (0.0-1.0)" }
    - hot_standby:                      { usage: GAUGE ,description: "hot standby mode enabled, 1=on 0=off" }
    - synchronous_commit:               { usage: GAUGE ,description: "synchronous commit level, 0=off 1=local 2=remote_write 3=on 4=remote_apply" }
    - io_method:                         { usage: GAUGE ,description: "I/O method (PG18+), 0=sync 1=worker 2=io_uring NULL=unavailable" }





================================================
FILE: config/0210-pg_repl.yml
================================================
#==============================================================#
# 0210 pg_repl
#==============================================================#
pg_repl_12:
  name: pg_repl
  desc: PostgreSQL replication stat metrics 12+
  query: |
    SELECT application_name AS appname, usename, coalesce(client_addr::TEXT,'localhost') AS address, pid::TEXT, client_port,
           CASE state WHEN 'streaming' THEN 0 WHEN 'startup' THEN 1 WHEN 'catchup' THEN 2 WHEN 'backup' THEN 3 WHEN 'stopping' THEN 4 ELSE -1 END AS state,
           CASE sync_state WHEN 'async' THEN 0 WHEN 'potential' THEN 1 WHEN 'sync' THEN 2 WHEN 'quorum' THEN 3 ELSE -1 END AS sync_state,
           sync_priority, backend_xmin::TEXT::BIGINT AS backend_xmin, current.lsn - '0/0' AS lsn,
           current.lsn - sent_lsn AS sent_diff, current.lsn - write_lsn AS write_diff, current.lsn - flush_lsn AS flush_diff, current.lsn - replay_lsn AS replay_diff,
           sent_lsn - '0/0' AS sent_lsn, write_lsn - '0/0' AS write_lsn, flush_lsn - '0/0' AS flush_lsn, replay_lsn - '0/0' AS replay_lsn,
           coalesce(extract(EPOCH FROM write_lag), 0)  AS write_lag, coalesce(extract(EPOCH FROM flush_lag), 0)  AS flush_lag, coalesce(extract(EPOCH FROM replay_lag), 0) AS replay_lag,
           extract(EPOCH FROM current_timestamp) AS "time", extract(EPOCH FROM backend_start) AS launch_time, extract(EPOCH FROM reply_time) AS reply_time
    FROM pg_stat_replication, (SELECT CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END AS lsn) current;
  ttl: 10
  min_version: 120000
  tags: [ cluster ]
  metrics:
    - appname:           { usage: LABEL   ,description: "Name of the application that is connected to this WAL sender" }
    - usename:           { usage: LABEL   ,description: "Name of the user logged into this WAL sender process" }
    - address:           { usage: LABEL   ,description: "IP address of the client connected to this WAL sender, localhost for unix socket" }
    - pid:               { usage: LABEL   ,description: "Process ID of the WAL sender process" }
    - client_port:       { usage: GAUGE   ,description: "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used" }
    - state:             { usage: GAUGE   ,description: "Current WAL sender encoded state 0-4 for streaming|startup|catchup|backup|stopping" }
    - sync_state:        { usage: GAUGE   ,description: "Encoded synchronous state of this standby server, 0-3 for async|potential|sync|quorum" }
    - sync_priority:     { usage: GAUGE   ,description: "Priority of this standby server for being chosen as the synchronous standby" }
    - backend_xmin:      { usage: COUNTER ,description: "This standby's xmin horizon reported by hot_standby_feedback." }
    - lsn:               { usage: COUNTER ,description: "Current log position on this server" }
    - sent_diff:         { usage: GAUGE   ,description: "Last log position sent to this standby server diff with current lsn" }
    - write_diff:        { usage: GAUGE   ,description: "Last log position written to disk by this standby server diff with current lsn" }
    - flush_diff:        { usage: GAUGE   ,description: "Last log position flushed to disk by this standby server diff with current lsn" }
    - replay_diff:       { usage: GAUGE   ,description: "Last log position replayed into the database on this standby server diff with current lsn" }
    - sent_lsn:          { usage: COUNTER ,description: "Last write-ahead log location sent on this connection" }
    - write_lsn:         { usage: COUNTER ,description: "Last write-ahead log location written to disk by this standby server" }
    - flush_lsn:         { usage: COUNTER ,description: "Last write-ahead log location flushed to disk by this standby server" }
    - replay_lsn:        { usage: COUNTER ,description: "Last write-ahead log location replayed into the database on this standby server" }
    - write_lag:         { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" }
    - flush_lag:         { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" }
    - replay_lag:        { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" }
    - time:              { usage: COUNTER ,description: "Current timestamp in unix epoch" }
    - launch_time:       { usage: COUNTER ,description: "Time when this process was started, i.e., when the client connected to this WAL sender" }
    - reply_time:        { usage: GAUGE   ,description: "Send time of last reply message received from standby server" }

pg_repl_10:
  name: pg_repl
  desc: PostgreSQL replication stat metrics v10 v11
  query: |
    SELECT application_name AS appname, usename, coalesce(client_addr::TEXT,'localhost') AS address, pid::TEXT, client_port,
           CASE state WHEN 'streaming' THEN 0 WHEN 'startup' THEN 1 WHEN 'catchup' THEN 2 WHEN 'backup' THEN 3 WHEN 'stopping' THEN 4 ELSE -1 END AS state,
           CASE sync_state WHEN 'async' THEN 0 WHEN 'potential' THEN 1 WHEN 'sync' THEN 2 WHEN 'quorum' THEN 3 ELSE -1 END AS sync_state,
           sync_priority, backend_xmin::TEXT::BIGINT AS backend_xmin, current.lsn - '0/0' AS lsn,
           current.lsn - sent_lsn AS sent_diff, current.lsn - write_lsn AS write_diff, current.lsn - flush_lsn AS flush_diff, current.lsn - replay_lsn AS replay_diff,
           sent_lsn - '0/0' AS sent_lsn, write_lsn - '0/0' AS write_lsn, flush_lsn - '0/0' AS flush_lsn, replay_lsn - '0/0' AS replay_lsn,
           coalesce(extract(EPOCH FROM write_lag), 0)  AS write_lag, coalesce(extract(EPOCH FROM flush_lag), 0)  AS flush_lag, coalesce(extract(EPOCH FROM replay_lag), 0) AS replay_lag,
           extract(EPOCH FROM current_timestamp) AS "time", extract(EPOCH FROM backend_start) AS launch_time
    FROM pg_stat_replication, (SELECT CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END AS lsn) current;
  ttl: 10
  min_version: 100000
  max_version: 120000
  tags: [ cluster ]
  metrics:
    - appname:           { usage: LABEL   ,description: "Name of the application that is connected to this WAL sender" }
    - usename:           { usage: LABEL   ,description: "Name of the user logged into this WAL sender process" }
    - address:           { usage: LABEL   ,description: "IP address of the client connected to this WAL sender, localhost for unix socket" }
    - pid:               { usage: LABEL   ,description: "Process ID of the WAL sender process" }
    - client_port:       { usage: GAUGE   ,description: "TCP port number that the client is using for communication with this WAL sender, or -1 if a Unix socket is used" }
    - state:             { usage: GAUGE   ,description: "Current WAL sender encoded state 0-4 for streaming|startup|catchup|backup|stopping" }
    - sync_state:        { usage: GAUGE   ,description: "Encoded synchronous state of this standby server, 0-3 for async|potential|sync|quorum" }
    - sync_priority:     { usage: GAUGE   ,description: "Priority of this standby server for being chosen as the synchronous standby" }
    - backend_xmin:      { usage: COUNTER ,description: "This standby's xmin horizon reported by hot_standby_feedback." }
    - lsn:               { usage: COUNTER ,description: "Current log position on this server" }
    - sent_diff:         { usage: GAUGE   ,description: "Last log position sent to this standby server diff with current lsn" }
    - write_diff:        { usage: GAUGE   ,description: "Last log position written to disk by this standby server diff with current lsn" }
    - flush_diff:        { usage: GAUGE   ,description: "Last log position flushed to disk by this standby server diff with current lsn" }
    - replay_diff:       { usage: GAUGE   ,description: "Last log position replayed into the database on this standby server diff with current lsn" }
    - sent_lsn:          { usage: COUNTER ,description: "Last write-ahead log location sent on this connection" }
    - write_lsn:         { usage: COUNTER ,description: "Last write-ahead log location written to disk by this standby server" }
    - flush_lsn:         { usage: COUNTER ,description: "Last write-ahead log location flushed to disk by this standby server" }
    - replay_lsn:        { usage: COUNTER ,description: "Last write-ahead log location replayed into the database on this standby server" }
    - write_lag:         { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" }
    - flush_lag:         { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" }
    - replay_lag:        { usage: GAUGE   ,description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" }
    - time:              { usage: COUNTER ,description: "Current timestamp in unix epoch" }
    - launch_time:       { usage: COUNTER ,description: "Time when this process was started, i.e., when the client connected to this WAL sender" }




================================================
FILE: config/0220-pg_sync_standby.yml
================================================
#==============================================================#
# 0220 pg_sync_standby
#==============================================================#
pg_sync_standby:
  name: pg_sync_standby
  desc: PostgreSQL synchronous standby status and names
  query: |
    SELECT CASE WHEN names <> '' THEN names ELSE '<null>' END AS names, CASE WHEN names <> '' THEN 1 ELSE 0 END AS enabled FROM (SELECT current_setting('synchronous_standby_names') AS names) n;
  ttl: 10
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - names:             { usage: LABEL ,description: "List of standby servers that can support synchronous replication, <null> if not enabled" }
    - enabled:           { usage: GAUGE ,description: "Synchronous commit enabled, 1 if enabled, 0 if disabled" }




================================================
FILE: config/0230-pg_downstream.yml
================================================
#==============================================================#
# 0230 pg_downstream
#==============================================================#
pg_downstream:
  name: pg_downstream
  desc: PostgreSQL replication client count group by state
  query: |
    SELECT l.state, coalesce(count, 0 ) AS count FROM unnest(ARRAY ['streaming','startup','catchup', 'backup', 'stopping']) l(state) LEFT JOIN (SELECT state, count(*) AS count FROM pg_stat_replication GROUP BY state)r ON l.state =  r.state;
  ttl: 10
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - state:             { usage: LABEL ,description: "Replication client state, could be one of startup|catchup|streaming|backup|stopping" }
    - count:             { usage: GAUGE ,description: "Count of corresponding state" }




================================================
FILE: config/0240-pg_slot.yml
================================================
#==============================================================#
# 0240 pg_slot
#==============================================================#
pg_slot_17:
  name: pg_slot
  desc: PostgreSQL replication slot metrics v17, slot also exists on standby
  query: |-
    SELECT s.slot_name, s.slot_type, plugin, database AS datname,datoid,active_pid,
       active,temporary,two_phase,conflicting,failover,synced,
       xmin::TEXT::BIGINT AS xmin,catalog_xmin::TEXT::BIGINT  AS catalog_xmin,
       restart_lsn - '0/0' AS restart_lsn, confirmed_flush_lsn - '0/0' AS confirm_lsn,
       CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END - restart_lsn AS retained_bytes,
       safe_wal_size, CASE wal_status WHEN 'reserved' THEN 0 WHEN 'extended' THEN 1 WHEN 'unreserved' THEN 2 WHEN 'lost' THEN 3 ELSE -1 END AS wal_status,
       spill_txns,spill_count,spill_bytes,stream_txns,stream_count,stream_bytes,total_txns,total_bytes,extract(EPOCH FROM stats_reset) AS reset_time,
       extract(EPOCH FROM inactive_since) AS inactive_since, CASE invalidation_reason WHEN 'wal_removed' THEN 1 WHEN 'rows_removed' THEN 2 WHEN 'wal_level_insufficient' THEN 3 ELSE 0 END AS invalidation_reason 
    FROM pg_replication_slots s LEFT OUTER JOIN pg_stat_replication_slots ss ON s.slot_name = ss.slot_name;

  ttl: 10
  min_version: 170000
  tags: [ cluster ]
  metrics:
    - slot_name:           { usage: LABEL    ,description: "A unique, cluster-wide identifier for the replication slot" }
    - slot_type:           { usage: LABEL    ,description: "The slot type, physical or logical" }
    - plugin:              { usage: LABEL    ,description: "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots." }
    - datname:             { usage: LABEL    ,description: "The name of the database this slot is associated with, logical slots only, null for physical slot" }
    - datoid:              { usage: GAUGE    ,description: "The OID of the database this slot is associated with, logical slots only, null for physical slot" }
    - active_pid:          { usage: GAUGE    ,description: "The process ID of the session streaming data for this slot. NULL if inactive." }
    - active:              { usage: GAUGE    ,description: "True(1) if this slot is currently actively being used" }
    - temporary:           { usage: GAUGE    ,description: "True(1) if this is a temporary replication slot." }
    - two_phase:           { usage: GAUGE    ,description: "True(1) if the slot is enabled for decoding prepared transactions. Always false for physical slots." }
    - conflicting:         { usage: GAUGE    ,description: "True(1) if this logical slot conflicted with recovery. Always NULL for physical slots." }
    - failover:            { usage: GAUGE    ,description: "True(1) if this is a logical slot enabled to be synced to the standbys" }
    - synced:              { usage: GAUGE    ,description: "True(1) if this is a logical slot that was synced from a primary server" }
    - xmin:                { usage: COUNTER  ,description: "The oldest transaction that this slot needs the database to retain." }
    - catalog_xmin:        { usage: COUNTER  ,description: "The oldest transaction affecting the system catalogs that this slot needs the database to retain." }
    - restart_lsn:         { usage: COUNTER  ,description: "The address (LSN) of oldest WAL which still might be required by the consumer of this slot" }
    - confirm_lsn:         { usage: COUNTER  ,description: "The address (LSN) up to which the logical slot's consumer has confirmed receiving data." }
    - retained_bytes:      { usage: GAUGE    ,description: "Size of bytes that retained for this slot" }
    - safe_wal_size:       { usage: GAUGE    ,description: "bytes that can be written to WAL which will not make slot into lost" }
    - wal_status:          { usage: GAUGE    ,description: "WAL reserve status 0-3 means reserved,extended,unreserved,lost, -1 means other" }
    - spill_txns:          { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding (subtrans included)" }
    - spill_count:         { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding" }
    - spill_bytes:         { usage: COUNTER  ,description: "Bytes that spilled to disk due to logical decode mem exceeding" }
    - stream_txns:         { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_count:        { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_bytes:        { usage: COUNTER  ,description: "Bytes that streamed to decoding output plugin after mem exceed" }
    - total_txns:          { usage: COUNTER  ,description: "Number of decoded xacts sent to the decoding output plugin for this slot" }
    - total_bytes:         { usage: COUNTER  ,description: "Number of decoded bytes sent to the decoding output plugin for this slot" }
    - reset_time:          { usage: GAUGE    ,description: "When statistics were last reset" }
    - invalidation_reason: { usage: GAUGE    ,description: "ok=0, wal_removed=1, rows_removed=2, wal_level_insufficient=3" }
    - inactive_since:      { usage: GAUGE    ,description: "The time when the slot became inactive" }

pg_slot_16:
  name: pg_slot
  desc: PostgreSQL replication slot metrics v16 with conflicting, now slot also exists on standby
  query: |-
    SELECT s.slot_name, s.slot_type, plugin, database AS datname,datoid,active_pid,
      active,temporary,two_phase,conflicting,xmin::TEXT::BIGINT AS xmin,catalog_xmin::TEXT::BIGINT  AS catalog_xmin,
      restart_lsn - '0/0' AS restart_lsn, confirmed_flush_lsn - '0/0' AS confirm_lsn,
      CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END - restart_lsn AS retained_bytes,
      safe_wal_size, CASE wal_status WHEN 'reserved' THEN 0 WHEN 'extended' THEN 1 WHEN 'unreserved' THEN 2 WHEN 'lost' THEN 3 ELSE -1 END AS wal_status,
      spill_txns,spill_count,spill_bytes,stream_txns,stream_count,stream_bytes,total_txns,total_bytes,extract(EPOCH FROM stats_reset) AS reset_time
    FROM pg_replication_slots s LEFT OUTER JOIN pg_stat_replication_slots ss ON s.slot_name = ss.slot_name;

  ttl: 10
  min_version: 160000
  max_version: 170000
  tags: [ cluster ]
  metrics:
    - slot_name:           { usage: LABEL    ,description: "A unique, cluster-wide identifier for the replication slot" }
    - slot_type:           { usage: LABEL    ,description: "The slot type, physical or logical" }
    - plugin:              { usage: LABEL    ,description: "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots." }
    - datname:             { usage: LABEL    ,description: "The name of the database this slot is associated with, logical slots only, null for physical slot" }
    - datoid:              { usage: GAUGE    ,description: "The OID of the database this slot is associated with, logical slots only, null for physical slot" }
    - active_pid:          { usage: GAUGE    ,description: "The process ID of the session streaming data for this slot. NULL if inactive." }
    - active:              { usage: GAUGE    ,description: "True(1) if this slot is currently actively being used" }
    - temporary:           { usage: GAUGE    ,description: "True(1) if this is a temporary replication slot." }
    - two_phase:           { usage: GAUGE    ,description: "True(1) if the slot is enabled for decoding prepared transactions. Always false for physical slots." }
    - conflicting:         { usage: GAUGE    ,description: "True if this logical slot conflicted with recovery. Always NULL for physical slots." }
    - xmin:                { usage: COUNTER  ,description: "The oldest transaction that this slot needs the database to retain." }
    - catalog_xmin:        { usage: COUNTER  ,description: "The oldest transaction affecting the system catalogs that this slot needs the database to retain." }
    - restart_lsn:         { usage: COUNTER  ,description: "The address (LSN) of oldest WAL which still might be required by the consumer of this slot" }
    - confirm_lsn:         { usage: COUNTER  ,description: "The address (LSN) up to which the logical slot's consumer has confirmed receiving data." }
    - retained_bytes:      { usage: GAUGE    ,description: "Size of bytes that retained for this slot" }
    - safe_wal_size:       { usage: GAUGE    ,description: "bytes that can be written to WAL which will not make slot into lost" }
    - wal_status:          { usage: GAUGE    ,description: "WAL reserve status 0-3 means reserved,extended,unreserved,lost, -1 means other" }
    - spill_txns:          { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding (subtrans included)" }
    - spill_count:         { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding" }
    - spill_bytes:         { usage: COUNTER  ,description: "Bytes that spilled to disk due to logical decode mem exceeding" }
    - stream_txns:         { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_count:        { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_bytes:        { usage: COUNTER  ,description: "Bytes that streamed to decoding output plugin after mem exceed" }
    - total_txns:          { usage: COUNTER  ,description: "Number of decoded xacts sent to the decoding output plugin for this slot" }
    - total_bytes:         { usage: COUNTER  ,description: "Number of decoded bytes sent to the decoding output plugin for this slot" }
    - reset_time:          { usage: GAUGE    ,description: "When statistics were last reset" }

pg_slot_14:
  name: pg_slot
  desc: PostgreSQL replication slot metrics v14 with pg_stat_replication_slots metrics
  query: |-
    SELECT s.slot_name, s.slot_type, plugin, database AS datname,datoid,active_pid,
      active,temporary,two_phase,xmin::TEXT::BIGINT AS xmin,catalog_xmin::TEXT::BIGINT  AS catalog_xmin,
      restart_lsn - '0/0' AS restart_lsn, confirmed_flush_lsn - '0/0' AS confirm_lsn,
      CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END - restart_lsn AS retained_bytes,
      safe_wal_size, CASE wal_status WHEN 'reserved' THEN 0 WHEN 'extended' THEN 1 WHEN 'unreserved' THEN 2 WHEN 'lost' THEN 3 ELSE -1 END AS wal_status,
      spill_txns,spill_count,spill_bytes,stream_txns,stream_count,stream_bytes,total_txns,total_bytes,extract(EPOCH FROM stats_reset) AS reset_time
    FROM pg_replication_slots s LEFT OUTER JOIN pg_stat_replication_slots ss ON s.slot_name = ss.slot_name;

  ttl: 10
  min_version: 140000
  max_version: 160000
  tags: [ cluster, primary ]
  metrics:
    - slot_name:           { usage: LABEL    ,description: "A unique, cluster-wide identifier for the replication slot" }
    - slot_type:           { usage: LABEL    ,description: "The slot type, physical or logical" }
    - plugin:              { usage: LABEL    ,description: "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots." }
    - datname:             { usage: LABEL    ,description: "The name of the database this slot is associated with, logical slots only, null for physical slot" }
    - datoid:              { usage: GAUGE    ,description: "The OID of the database this slot is associated with, logical slots only, null for physical slot" }
    - active_pid:          { usage: GAUGE    ,description: "The process ID of the session streaming data for this slot. NULL if inactive." }
    - active:              { usage: GAUGE    ,description: "True(1) if this slot is currently actively being used" }
    - temporary:           { usage: GAUGE    ,description: "True(1) if this is a temporary replication slot." }
    - two_phase:           { usage: GAUGE    ,description: "True(1) if the slot is enabled for decoding prepared transactions. Always false for physical slots." }
    - xmin:                { usage: COUNTER  ,description: "The oldest transaction that this slot needs the database to retain." }
    - catalog_xmin:        { usage: COUNTER  ,description: "The oldest transaction affecting the system catalogs that this slot needs the database to retain." }
    - restart_lsn:         { usage: COUNTER  ,description: "The address (LSN) of oldest WAL which still might be required by the consumer of this slot" }
    - confirm_lsn:         { usage: COUNTER  ,description: "The address (LSN) up to which the logical slot's consumer has confirmed receiving data." }
    - retained_bytes:      { usage: GAUGE    ,description: "Size of bytes that retained for this slot" }
    - safe_wal_size:       { usage: GAUGE    ,description: "bytes that can be written to WAL which will not make slot into lost" }
    - wal_status:          { usage: GAUGE    ,description: "WAL reserve status 0-3 means reserved,extended,unreserved,lost, -1 means other" }
    - spill_txns:          { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding (subtrans included)" }
    - spill_count:         { usage: COUNTER  ,description: "Xacts that spilled to disk due to logical decode mem exceeding" }
    - spill_bytes:         { usage: COUNTER  ,description: "Bytes that spilled to disk due to logical decode mem exceeding" }
    - stream_txns:         { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_count:        { usage: COUNTER  ,description: "Xacts that streamed to decoding output plugin after mem exceed" }
    - stream_bytes:        { usage: COUNTER  ,description: "Bytes that streamed to decoding output plugin after mem exceed" }
    - total_txns:          { usage: COUNTER  ,description: "Number of decoded xacts sent to the decoding output plugin for this slot" }
    - total_bytes:         { usage: COUNTER  ,description: "Number of decoded bytes sent to the decoding output plugin for this slot" }
    - reset_time:          { usage: GAUGE    ,description: "When statistics were last reset" }

pg_slot_13:
  name: pg_slot
  desc: PostgreSQL replication slot metrics v13 (wal safe size and status)
  query: |-
    SELECT slot_name, slot_type, plugin, database AS datname,datoid,active_pid,
      active,temporary,xmin::TEXT::BIGINT AS xmin,catalog_xmin::TEXT::BIGINT  AS catalog_xmin,
      restart_lsn - '0/0' AS restart_lsn, confirmed_flush_lsn - '0/0' AS confirm_lsn,
      CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END - restart_lsn AS retained_bytes,
      safe_wal_size, CASE wal_status WHEN 'reserved' THEN 0 WHEN 'extended' THEN 1 WHEN 'unreserved' THEN 2 WHEN 'lost' THEN 3 ELSE -1 END AS wal_status
    FROM pg_replication_slots;

  ttl: 10
  min_version: 130000
  max_version: 140000
  tags: [ cluster, primary ]
  metrics:
    - slot_name:           { usage: LABEL    ,description: "A unique, cluster-wide identifier for the replication slot" }
    - slot_type:           { usage: LABEL    ,description: "The slot type, physical or logical" }
    - plugin:              { usage: LABEL    ,description: "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots." }
    - datname:             { usage: LABEL    ,description: "The name of the database this slot is associated with, logical slots only, null for physical slot" }
    - datoid:              { usage: GAUGE    ,description: "The OID of the database this slot is associated with, logical slots only, null for physical slot" }
    - active_pid:          { usage: GAUGE    ,description: "The process ID of the session streaming data for this slot. NULL if inactive." }
    - active:              { usage: GAUGE    ,description: "True(1) if this slot is currently actively being used" }
    - temporary:           { usage: GAUGE    ,description: "True(1) if this is a temporary replication slot." }
    - xmin:                { usage: COUNTER  ,description: "The oldest transaction that this slot needs the database to retain." }
    - catalog_xmin:        { usage: COUNTER  ,description: "The oldest transaction affecting the system catalogs that this slot needs the database to retain." }
    - restart_lsn:         { usage: COUNTER  ,description: "The address (LSN) of oldest WAL which still might be required by the consumer of this slot" }
    - confirm_lsn:         { usage: COUNTER  ,description: "The address (LSN) up to which the logical slot's consumer has confirmed receiving data." }
    - retained_bytes:      { usage: GAUGE    ,description: "Size of bytes that retained for this slot" }
    - safe_wal_size:       { usage: GAUGE    ,description: "bytes that can be written to WAL which will not make slot into lost" }
    - wal_status:          { usage: GAUGE    ,description: "WAL reserve status 0-3 means reserved,extended,unreserved,lost, -1 means other" }

pg_slot_10:
  name: pg_slot
  desc: PostgreSQL replication slot metrics 10 ~ 12
  query: |-
    SELECT slot_name, slot_type, plugin, database AS datname,datoid,active_pid,
      active,temporary,xmin::TEXT::BIGINT AS xmin,catalog_xmin::TEXT::BIGINT  AS catalog_xmin,
      restart_lsn - '0/0' AS restart_lsn, confirmed_flush_lsn - '0/0' AS confirm_lsn,
      CASE WHEN pg_is_in_recovery() THEN pg_last_wal_replay_lsn() ELSE pg_current_wal_lsn() END - restart_lsn AS retained_bytes
    FROM pg_replication_slots;

  ttl: 10
  min_version: 100000
  max_version: 130000
  tags: [ cluster, primary ]
  metrics:
    - slot_name:           { usage: LABEL    ,description: "A unique, cluster-wide identifier for the replication slot" }
    - slot_type:           { usage: LABEL    ,description: "The slot type, physical or logical" }
    - plugin:              { usage: LABEL    ,description: "The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots." }
    - datname:             { usage: LABEL    ,description: "The name of the database this slot is associated with, logical slots only, null for physical slot" }
    - datoid:              { usage: GAUGE    ,description: "The OID of the database this slot is associated with, logical slots only, null for physical slot" }
    - active_pid:          { usage: GAUGE    ,description: "The process ID of the session streaming data for this slot. NULL if inactive." }
    - active:              { usage: GAUGE    ,description: "True(1) if this slot is currently actively being used" }
    - temporary:           { usage: GAUGE    ,description: "True(1) if this is a temporary replication slot." }
    - xmin:                { usage: COUNTER  ,description: "The oldest transaction that this slot needs the database to retain." }
    - catalog_xmin:        { usage: COUNTER  ,description: "The oldest transaction affecting the system catalogs that this slot needs the database to retain." }
    - restart_lsn:         { usage: COUNTER  ,description: "The address (LSN) of oldest WAL which still might be required by the consumer of this slot" }
    - confirm_lsn:         { usage: COUNTER  ,description: "The address (LSN) up to which the logical slot's consumer has confirmed receiving data." }
    - retained_bytes:      { usage: GAUGE    ,description: "Size of bytes that retained for this slot" }




================================================
FILE: config/0250-pg_recv.yml
================================================
#==============================================================#
# 0250 pg_recv
#==============================================================#
pg_recv_13:
  name: pg_recv
  desc: PostgreSQL walreceiver metrics 13+
  query: |-
    SELECT 
      coalesce(sender_host, (regexp_match(conninfo, '.*host=(\S+).*'))[1]) AS sender_host, coalesce(sender_port::TEXT, (regexp_match(conninfo, '.*port=(\S+).*'))[1]) AS sender_port, coalesce(slot_name, 'NULL') AS slot_name,
      pid, CASE status WHEN 'streaming' THEN 0 WHEN 'startup' THEN 1 WHEN 'catchup' THEN 2 WHEN 'backup' THEN 3 WHEN 'stopping' THEN 4 ELSE -1 END AS state,
      receive_start_lsn - '0/0' AS init_lsn,receive_start_tli AS init_tli,
      flushed_lsn - '0/0' AS flush_lsn,written_lsn - '0/0' AS write_lsn, received_tli AS flush_tli, latest_end_lsn - '0/0' AS reported_lsn,
      last_msg_send_time AS msg_send_time,last_msg_receipt_time AS msg_recv_time,latest_end_time AS reported_time,now() AS time FROM pg_stat_wal_receiver;

  ttl: 10
  min_version: 130000
  tags: [ cluster, replica ]
  metrics:
    - sender_host:         { usage: LABEL   ,description: "Host of the PostgreSQL instance this WAL receiver is connected to" }
    - sender_port:         { usage: LABEL   ,description: "Port number of the PostgreSQL instance this WAL receiver is connected to." }
    - slot_name:           { usage: LABEL   ,description: "Replication slot name used by this WAL receiver" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the WAL receiver process" }
    - state:               { usage: GAUGE   ,description: "Encoded activity status of the WAL receiver process 0-4 for streaming|startup|catchup|backup|stopping" }
    - init_lsn:            { usage: COUNTER ,description: "First write-ahead log location used when WAL receiver is started" }
    - init_tli:            { usage: COUNTER ,description: "First timeline number used when WAL receiver is started" }
    - flush_lsn:           { usage: COUNTER ,description: "Last write-ahead log location already received and flushed to disk" }
    - write_lsn:           { usage: COUNTER ,description: "Last write-ahead log location already received and written to disk, but not flushed." }
    - flush_tli:           { usage: COUNTER ,description: "Timeline number of last write-ahead log location received and flushed to disk" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }
    - time:                { usage: GAUGE   ,description: "Time of current snapshot" }

pg_recv_11:
  name: pg_recv
  desc: PostgreSQL walreceiver metrics (11-12)
  query: |-
    SELECT 
      coalesce(sender_host, (regexp_match(conninfo, '.*host=(\S+).*'))[1]) AS sender_host, coalesce(sender_port::TEXT, (regexp_match(conninfo, '.*port=(\S+).*'))[1]) AS sender_port, coalesce(slot_name, 'NULL') AS slot_name,
      pid, CASE status WHEN 'streaming' THEN 0 WHEN 'startup' THEN 1 WHEN 'catchup' THEN 2 WHEN 'backup' THEN 3 WHEN 'stopping' THEN 4 ELSE -1 END AS state,
      receive_start_lsn - '0/0' AS init_lsn,receive_start_tli AS init_tli,
      received_lsn - '0/0' AS flush_lsn, received_tli AS flush_tli, latest_end_lsn - '0/0' AS reported_lsn,
      last_msg_send_time AS msg_send_time,last_msg_receipt_time AS msg_recv_time,latest_end_time AS reported_time,now() AS time FROM pg_stat_wal_receiver;

  ttl: 10
  tags: [ cluster, replica ]
  min_version: 110000
  max_version: 130000
  metrics:
    - sender_host:         { usage: LABEL   ,description: "Host of the PostgreSQL instance this WAL receiver is connected to" }
    - sender_port:         { usage: LABEL   ,description: "Port number of the PostgreSQL instance this WAL receiver is connected to." }
    - slot_name:           { usage: LABEL   ,description: "Replication slot name used by this WAL receiver" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the WAL receiver process" }
    - state:               { usage: GAUGE   ,description: "Encoded activity status of the WAL receiver process 0-4 for streaming|startup|catchup|backup|stopping" }
    - init_lsn:            { usage: COUNTER ,description: "First write-ahead log location used when WAL receiver is started" }
    - init_tli:            { usage: COUNTER ,description: "First timeline number used when WAL receiver is started" }
    - flush_lsn:           { usage: COUNTER ,description: "Last write-ahead log location already received and flushed to disk" }
    - flush_tli:           { usage: COUNTER ,description: "Timeline number of last write-ahead log location received and flushed to disk" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }
    - time:                { usage: GAUGE   ,description: "Time of current snapshot" }

pg_recv_10:
  name: pg_recv
  desc: PostgreSQL walreceiver metrics (10)
  query: |-
    SELECT 
      (regexp_match(conninfo, '.*host=(\S+).*'))[1] AS sender_host, (regexp_match(conninfo, '.*port=(\S+).*'))[1] AS sender_port, coalesce(slot_name, 'NULL') AS slot_name,
      pid, CASE status WHEN 'streaming' THEN 0 WHEN 'startup' THEN 1 WHEN 'catchup' THEN 2 WHEN 'backup' THEN 3 WHEN 'stopping' THEN 4 ELSE -1 END AS state,
      receive_start_lsn - '0/0' AS init_lsn,receive_start_tli AS init_tli,
      received_lsn - '0/0' AS flush_lsn, received_tli AS flush_tli, latest_end_lsn - '0/0' AS reported_lsn,
      last_msg_send_time AS msg_send_time,last_msg_receipt_time AS msg_recv_time,latest_end_time AS reported_time,now() AS time FROM pg_stat_wal_receiver;

  ttl: 10
  tags: [ cluster, replica ]
  min_version: 100000
  max_version: 110000
  metrics:
    - sender_host:         { usage: LABEL   ,description: "Host of the PostgreSQL instance this WAL receiver is connected to" }
    - sender_port:         { usage: LABEL   ,description: "Port number of the PostgreSQL instance this WAL receiver is connected to." }
    - slot_name:           { usage: LABEL   ,description: "Replication slot name used by this WAL receiver" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the WAL receiver process" }
    - state:               { usage: GAUGE   ,description: "Encoded activity status of the WAL receiver process 0-4 for streaming|startup|catchup|backup|stopping" }
    - init_lsn:            { usage: COUNTER ,description: "First write-ahead log location used when WAL receiver is started" }
    - init_tli:            { usage: COUNTER ,description: "First timeline number used when WAL receiver is started" }
    - flush_lsn:           { usage: COUNTER ,description: "Last write-ahead log location already received and flushed to disk" }
    - flush_tli:           { usage: COUNTER ,description: "Timeline number of last write-ahead log location received and flushed to disk" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }
    - time:                { usage: GAUGE   ,description: "Time of current snapshot" }



================================================
FILE: config/0260-pg_sub.yml
================================================
#==============================================================#
# 0260 pg_sub
#==============================================================#
pg_sub_16:
  name: pg_sub
  desc: PostgreSQL subscription statistics (16+)
  query: |-
    SELECT 
      s1.subname, subid AS id, pid, received_lsn, reported_lsn,
      msg_send_time, msg_recv_time, reported_time,
      apply_error_count, sync_error_count
    FROM
      (SELECT
        subname, subid, pid,
        received_lsn - '0/0' AS received_lsn, latest_end_lsn - '0/0' AS reported_lsn,
        extract(epoch from last_msg_send_time) AS msg_send_time,
        extract(epoch from last_msg_receipt_time) AS msg_recv_time,
        extract(epoch from latest_end_time) AS reported_time
      FROM pg_stat_subscription 
      WHERE relid IS NULL AND leader_pid IS NULL) s1
    LEFT OUTER JOIN pg_stat_subscription_stats s2 USING(subid);

  ttl: 10
  min_version: 160000
  tags: [ cluster ]
  metrics:
    - subname:             { usage: LABEL   ,description: "Name of this subscription" }
    - id:                  { usage: GAUGE   ,description: "OID of the subscription" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the subscription leader apply worker" }
    - received_lsn:        { usage: COUNTER ,description: "Last write-ahead log location received" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }
    - apply_error_count:   { usage: COUNTER ,description: "Number of times an error occurred while applying changes" }
    - sync_error_count:    { usage: COUNTER ,description: "Number of times an error occurred during the initial table synchronization" }

pg_sub_15:
  name: pg_sub
  desc: PostgreSQL subscription statistics (15)
  query: |-
    SELECT 
      s1.subname, subid AS id, pid, received_lsn, reported_lsn,
      msg_send_time, msg_recv_time, reported_time,
      apply_error_count, sync_error_count
    FROM
      (SELECT
        subname, subid, pid,
        received_lsn - '0/0' AS received_lsn, latest_end_lsn - '0/0' AS reported_lsn,
        extract(epoch from last_msg_send_time) AS msg_send_time,
        extract(epoch from last_msg_receipt_time) AS msg_recv_time,
        extract(epoch from latest_end_time) AS reported_time
      FROM pg_stat_subscription WHERE relid ISNULL) s1
    LEFT OUTER JOIN pg_stat_subscription_stats s2 USING(subid);

  ttl: 10
  min_version: 150000
  max_version: 160000
  tags: [ cluster ]
  metrics:
    - subname:             { usage: LABEL   ,description: "Name of this subscription" }
    - id:                  { usage: GAUGE   ,description: "OID of the subscription" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the subscription main apply worker process" }
    - received_lsn:        { usage: COUNTER ,description: "Last write-ahead log location received" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }
    - apply_error_count:   { usage: COUNTER ,description: "Number of times an error occurred while applying changes." }
    - sync_error_count:    { usage: COUNTER ,description: "Number of times an error occurred during the initial table synchronization" }

pg_sub_10:
  name: pg_sub
  desc: PostgreSQL subscription statistics (10-14)
  query: |-
    SELECT 
      subname, subid AS id, pid,
      received_lsn - '0/0' AS received_lsn, latest_end_lsn - '0/0' AS reported_lsn,
      extract(epoch from last_msg_send_time) AS msg_send_time,
      extract(epoch from last_msg_receipt_time) AS msg_recv_time,
      extract(epoch from latest_end_time) AS reported_time
    FROM pg_stat_subscription WHERE relid ISNULL;

  ttl: 10
  min_version: 100000
  max_version: 150000
  tags: [ cluster ]
  metrics:
    - subname:             { usage: LABEL   ,description: "Name of this subscription" }
    - id:                  { usage: GAUGE   ,description: "OID of the subscription" }
    - pid:                 { usage: GAUGE   ,description: "Process ID of the subscription main apply worker process" }
    - received_lsn:        { usage: COUNTER ,description: "Last write-ahead log location received" }
    - reported_lsn:        { usage: COUNTER ,description: "Last write-ahead log location reported to origin WAL sender" }
    - msg_send_time:       { usage: GAUGE   ,description: "Send time of last message received from origin WAL sender" }
    - msg_recv_time:       { usage: GAUGE   ,description: "Receipt time of last message received from origin WAL sender" }
    - reported_time:       { usage: GAUGE   ,description: "Time of last write-ahead log location reported to origin WAL sender" }




================================================
FILE: config/0270-pg_origin.yml
================================================
#==============================================================#
# 0270 pg_origin
#==============================================================#
# skip by default, require additional privilege setup
# GRANT SELECT ON pg_replication_origin, pg_replication_origin_status TO pg_monitor;
pg_origin:
  name: pg_origin
  desc: PostgreSQL replay state (approximate) for a certain origin
  query: SELECT roname, remote_lsn - '0/0' AS remote_lsn, local_lsn - '0/0' AS local_lsn FROM pg_replication_origin o LEFT JOIN pg_replication_origin_status os ON o.roident = os.local_id;
  ttl: 10
  min_version: 090500
  skip: true
  tags: [ cluster ]
  metrics:
    - roname:              { usage: LABEL     ,description: "The external, user defined, name of a replication origin." }
    - remote_lsn:          { usage: COUNTER   ,description: "The origin node's LSN up to which data has been replicated." }
    - local_lsn:           { usage: COUNTER   ,description: "This node's LSN at which remote_lsn has been replicated." }




================================================
FILE: config/0300-pg_io.yml
================================================
#==============================================================#
# 0300 pg_io
#==============================================================#
pg_io_18:
  name: pg_io
  desc: PostgreSQL I/O stats since v18
  query: |-
    SELECT backend_type AS "type",object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,
    extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_io;

  ttl: 10
  timeout: 1
  min_version: 180000
  tags: [ cluster ]
  metrics:
    - type:               { usage: LABEL                              ,description: "Type of backend" }
    - object:             { usage: LABEL                              ,description: "Target object of an I/O operation, relation or temp" }
    - context:            { usage: LABEL                              ,description: "The context of an I/O operation. normal,vacuum,bulkread,bulkwrite" }
    - reads:              { usage: COUNTER ,default: 0                ,description: "Number of read operations, each of the size specified in op_bytes." }
    - read_bytes:         { usage: COUNTER ,default: 0                ,description: "Number of read bytes" }
    - read_time:          { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in read operations in seconds" }
    - writes:             { usage: COUNTER ,default: 0                ,description: "Number of write operations, each of the size specified in op_bytes." }
    - write_time:         { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in write operations in seconds" }
    - write_bytes:        { usage: COUNTER ,default: 0                ,description: "Number of write bytes" }
    - writebacks:         { usage: COUNTER ,default: 0                ,description: "Number of units of size op_bytes which the process requested the kernel write out to permanent storage." }
    - writeback_time:     { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in writeback operations in seconds" }
    - extends:            { usage: COUNTER ,default: 0                ,description: "Number of relation extend operations, each of the size specified in op_bytes." }
    - extend_bytes:       { usage: COUNTER ,default: 0                ,description: "Number of extend bytes" }
    - extend_time:        { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in extend operations in seconds" }
    - hits:               { usage: COUNTER ,default: 0                ,description: "The number of times a desired block was found in a shared buffer." }
    - evictions:          { usage: COUNTER ,default: 0                ,description: "Number of times a block has been written out from a shared or local buffer" }
    - reuses:             { usage: COUNTER ,default: 0                ,description: "The number of times an existing buffer is reused" }
    - fsyncs:             { usage: COUNTER ,default: 0                ,description: "Number of fsync calls. These are only tracked in context normal" }
    - fsync_time:         { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in fsync operations in seconds" }
    - reset_time:         { usage: GAUGE                              ,description: "Timestamp at which these statistics were last reset" }

pg_io_16:
  name: pg_io
  desc: PostgreSQL I/O stats
  query: |-
    SELECT backend_type AS "type", object, context, reads, read_time,writes,write_time,writebacks,writeback_time,extends,
      extend_time,hits,evictions,reuses,fsyncs,fsync_time,extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_io;

  ttl: 10
  timeout: 1
  min_version: 160000
  max_version: 180000
  tags: [ cluster ]
  metrics:
    - type:               { usage: LABEL                              ,description: "Type of backend" }
    - object:             { usage: LABEL                              ,description: "Target object of an I/O operation, relation or temp" }
    - context:            { usage: LABEL                              ,description: "The context of an I/O operation. normal,vacuum,bulkread,bulkwrite" }
    - reads:              { usage: COUNTER ,default: 0                ,description: "Number of read operations, each of the size specified in op_bytes." }
    - read_time:          { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in read operations in seconds" }
    - writes:             { usage: COUNTER ,default: 0                ,description: "Number of write operations, each of the size specified in op_bytes." }
    - write_time:         { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in write operations in seconds" }
    - writebacks:         { usage: COUNTER ,default: 0                ,description: "Number of units of size op_bytes which the process requested the kernel write out to permanent storage." }
    - writeback_time:     { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in writeback operations in seconds" }
    - extends:            { usage: COUNTER ,default: 0                ,description: "Number of relation extend operations, each of the size specified in op_bytes." }
    - extend_time:        { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in extend operations in seconds" }
    - hits:               { usage: COUNTER ,default: 0                ,description: "The number of times a desired block was found in a shared buffer." }
    - evictions:          { usage: COUNTER ,default: 0                ,description: "Number of times a block has been written out from a shared or local buffer" }
    - reuses:             { usage: COUNTER ,default: 0                ,description: "The number of times an existing buffer is reused" }
    - fsyncs:             { usage: COUNTER ,default: 0                ,description: "Number of fsync calls. These are only tracked in context normal" }
    - fsync_time:         { usage: COUNTER ,default: 0  ,scale: 1e-3  ,description: "Time spent in fsync operations in seconds" }
    - reset_time:         { usage: GAUGE                              ,description: "Timestamp at which these statistics were last reset" }




================================================
FILE: config/0310-pg_size.yml
================================================
#==============================================================#
# 0310 pg_size
#==============================================================#
pg_size:
  name: pg_size
  desc: PostgreSQL Database, WAL, Log size since v10
  query: |-
    SELECT datname, pg_database_size(oid) AS bytes FROM pg_database
    UNION ALL SELECT 'log', CASE WHEN current_setting('logging_collector') = 'on' THEN COALESCE((SELECT SUM(size) FROM pg_catalog.pg_ls_logdir()), 0) ELSE 0 END
    UNION ALL SELECT 'wal', COALESCE((SELECT SUM(size) FROM pg_catalog.pg_ls_waldir()), 0);

  ttl: 60
  timeout: 1
  min_version: 100000
  tags: [ cluster ]
  metrics:
    - datname:             { usage: LABEL   ,description: "Database name, or special category wal, or log" }
    - bytes:               { usage: GAUGE   ,description: "File size in bytes" }




================================================
FILE: config/0320-pg_archiver.yml
================================================
#==============================================================#
# 0320 pg_archiver
#==============================================================#
pg_archiver:
  name: pg_archiver
  desc: PostgreSQL archiver process statistics
  query: |-
    SELECT archived_count AS finish_count,failed_count,
      extract(epoch FROM last_archived_time) AS finish_time,
      extract(epoch FROM last_failed_time) AS failed_time,
      extract(epoch FROM stats_reset) AS reset_time
    FROM pg_stat_archiver;

  ttl: 60
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - finish_count:        { usage: COUNTER ,description: "Number of WAL files that have been successfully archived" }
    - failed_count:        { usage: COUNTER ,description: "Number of failed attempts for archiving WAL files" }
    - finish_time:         { usage: GAUGE   ,description: "Time of the last successful archive operation" }
    - failed_time:         { usage: GAUGE   ,description: "Time of the last failed archival operation" }
    - reset_time:          { usage: GAUGE   ,description: "Time at which archive statistics were last reset" }




================================================
FILE: config/0330-pg_bgwriter.yml
================================================
#==============================================================#
# 0330 pg_bgwriter
#==============================================================#
# https://pgpedia.info/p/pg_stat_bgwriter.html
pg_bgwriter_17:
  name: pg_bgwriter
  desc: "PostgreSQL background writer metrics PG 17+"
  query: SELECT buffers_clean, maxwritten_clean, buffers_alloc, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_bgwriter;
  ttl: 10
  min_version: 170000
  tags: [ cluster ]
  metrics:
    - buffers_clean:       { usage: COUNTER ,description: "Number of buffers written by the background writer" }
    - maxwritten_clean:    { usage: COUNTER ,description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" }
    - buffers_alloc:       { usage: COUNTER ,description: "Number of buffers allocated" }
    - reset_time:          { usage: GAUGE   ,description: "Time at which bgwriter statistics were last reset" }

pg_bgwriter_10:
  name: pg_bgwriter
  desc: "PostgreSQL background writer metrics (PG 9.4-16)"
  query: SELECT checkpoints_timed, checkpoints_req, checkpoint_write_time, checkpoint_sync_time, buffers_checkpoint, buffers_clean, buffers_backend, maxwritten_clean, buffers_backend_fsync, buffers_alloc, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_bgwriter;
  ttl: 10
  min_version: 090400
  max_version: 170000
  tags: [ cluster ]
  metrics:
    - checkpoints_timed:     { usage: COUNTER              ,description: "Number of scheduled checkpoints that have been performed" }
    - checkpoints_req:       { usage: COUNTER              ,description: "Number of requested checkpoints that have been performed" }
    - checkpoint_write_time: { usage: COUNTER ,scale: 1e-3 ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds" }
    - checkpoint_sync_time:  { usage: COUNTER ,scale: 1e-3 ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in seconds" }
    - buffers_checkpoint:    { usage: COUNTER              ,description: "Number of buffers written during checkpoints" }
    - buffers_clean:         { usage: COUNTER              ,description: "Number of buffers written by the background writer" }
    - buffers_backend:       { usage: COUNTER              ,description: "Number of buffers written directly by a backend" }
    - maxwritten_clean:      { usage: COUNTER              ,description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" }
    - buffers_backend_fsync: { usage: COUNTER              ,description: "Number of times a backend had to execute its own fsync call" }
    - buffers_alloc:         { usage: COUNTER              ,description: "Number of buffers allocated" }
    - reset_time:            { usage: GAUGE                ,description: "Time at which bgwriter statistics were last reset" }



================================================
FILE: config/0331-pg_checkpointer.yml
================================================
#==============================================================#
# 0331 pg_checkpointer
#==============================================================#
pg_checkpointer_18:
  name: pg_checkpointer
  desc: "PostgreSQL checkpointer stat metrics for pg 18+"
  query: SELECT num_timed, num_requested, num_done, restartpoints_timed, restartpoints_req, restartpoints_done, write_time, sync_time, buffers_written, slru_written, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_checkpointer;
  ttl: 10
  min_version: 180000
  tags: [ cluster ]
  metrics:
    - num_timed:             { usage: COUNTER ,rename: timed ,description: "Number of scheduled checkpoints that have been performed" }
    - num_requested:         { usage: COUNTER ,rename: req   ,description: "Number of requested checkpoints that have been performed" }
    - num_done:              { usage: COUNTER ,rename: done  ,description: "Number of checkpoints that have been performed" }
    - restartpoints_timed:   { usage: COUNTER                ,description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" }
    - restartpoints_req:     { usage: COUNTER                ,description: "Number of requested restartpoints" }
    - restartpoints_done:    { usage: COUNTER                ,description: "Number of restartpoints that have been performed" }
    - write_time:            { usage: COUNTER ,scale: 1e-3   ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds" }
    - sync_time:             { usage: COUNTER ,scale: 1e-3   ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in seconds" }
    - buffers_written:       { usage: COUNTER                ,description: "Number of buffers written during checkpoints and restartpoints" }
    - slru_written:          { usage: COUNTER                ,description: "Number of SLRU buffers written during checkpoints and restartpoints" }
    - reset_time:            { usage: GAUGE                  ,description: "Time at which checkpointer statistics were last reset" }

pg_checkpointer_17:
  name: pg_checkpointer
  desc: "PostgreSQL checkpointer stat metrics for pg 17"
  query: SELECT num_timed, num_requested, restartpoints_timed, restartpoints_req, restartpoints_done, write_time, sync_time, buffers_written, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_checkpointer;
  ttl: 10
  min_version: 170000
  max_version: 180000
  tags: [ cluster ]
  metrics:
    - num_timed:             { usage: COUNTER ,rename: timed ,description: "Number of scheduled checkpoints that have been performed" }
    - num_requested:         { usage: COUNTER ,rename: req   ,description: "Number of requested checkpoints that have been performed" }
    - restartpoints_timed:   { usage: COUNTER                ,description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" }
    - restartpoints_req:     { usage: COUNTER                ,description: "Number of requested restartpoints" }
    - restartpoints_done:    { usage: COUNTER                ,description: "Number of restartpoints that have been performed" }
    - write_time:            { usage: COUNTER ,scale: 1e-3   ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds" }
    - sync_time:             { usage: COUNTER ,scale: 1e-3   ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in seconds" }
    - buffers_written:       { usage: COUNTER                ,description: "Number of buffers written during checkpoints and restartpoints" }
    - reset_time:            { usage: GAUGE                  ,description: "Time at which checkpointer statistics were last reset" }

pg_checkpointer_10:
  name: pg_checkpointer
  desc: "PostgreSQL checkpointer stat metrics for pg 9.4-16"
  query: SELECT checkpoints_timed, checkpoints_req, checkpoint_write_time, checkpoint_sync_time, buffers_checkpoint, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_bgwriter;
  ttl: 10
  min_version: 090400
  max_version: 170000
  tags: [ cluster ]
  metrics:
    - checkpoints_timed:     { usage: COUNTER ,rename: timed                   ,description: "Number of scheduled checkpoints that have been performed" }
    - checkpoints_req:       { usage: COUNTER ,rename: req                     ,description: "Number of requested checkpoints that have been performed" }
    - checkpoint_write_time: { usage: COUNTER ,rename: write_time ,scale: 1e-3 ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in seconds" }
    - checkpoint_sync_time:  { usage: COUNTER ,rename: sync_time  ,scale: 1e-3 ,description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in seconds" }
    - buffers_checkpoint:    { usage: COUNTER ,rename: buffers_written         ,description: "Number of buffers written during checkpoints and restartpoints" }
    - reset_time:            { usage: GAUGE                                    ,description: "Time at which checkpointer statistics were last reset" }



================================================
FILE: config/0340-pg_ssl.yml
================================================
#==============================================================#
# 0340 pg_ssl
#==============================================================#
pg_ssl:
  name: pg_ssl
  desc: PostgreSQL SSL client connection count
  query: |
    SELECT count(*) FILTER (WHERE ssl) AS enabled, count(*) FILTER ( WHERE NOT ssl) AS disabled FROM pg_stat_ssl;
  ttl: 10
  min_version: 090500
  tags: [ cluster ]
  metrics:
    - enabled:            { usage: GAUGE   ,description: "Number of client connection that use ssl" }
    - disabled:           { usage: GAUGE   ,description: "Number of client connection that does not use ssl" }




================================================
FILE: config/0350-pg_checkpoint.yml
================================================
#==============================================================#
# 0350 pg_checkpoint
#==============================================================#
pg_checkpoint:
  name: pg_checkpoint
  desc: checkpoint information from pg_control_checkpoint since 10
  query: |-
    SELECT 
      checkpoint_lsn - '0/0' AS checkpoint_lsn,
      redo_lsn - '0/0' AS redo_lsn,
      timeline_id AS tli,
      prev_timeline_id AS prev_tli,
      full_page_writes,
      split_part(next_xid, ':', 1) AS next_xid_epoch,
      split_part(next_xid, ':', 2) AS next_xid,
      next_oid::BIGINT,
      next_multixact_id::text::BIGINT,
      next_multi_offset::text::BIGINT,
      oldest_xid::text::BIGINT,
      oldest_xid_dbid::text::BIGINT,
      oldest_active_xid::text::BIGINT,
      oldest_multi_xid::text::BIGINT,
      oldest_multi_dbid::BIGINT,
      oldest_commit_ts_xid::text::BIGINT,
      newest_commit_ts_xid::text::BIGINT,
      checkpoint_time                             AS time,
      extract(epoch from now() - checkpoint_time) AS elapse
    FROM pg_control_checkpoint();

  ttl: 60
  min_version: 100000
  tags: [ cluster ]
  metrics:
    - checkpoint_lsn:       { usage: COUNTER ,description: "Latest checkpoint location" }
    - redo_lsn:             { usage: COUNTER ,description: "Latest checkpoint's REDO location" }
    - tli:                  { usage: COUNTER ,description: "Latest checkpoint's TimeLineID" }
    - prev_tli:             { usage: COUNTER ,description: "Latest checkpoint's PrevTimeLineID" }
    - full_page_writes:     { usage: GAUGE   ,description: "Latest checkpoint's full_page_writes enabled" }
    - next_xid_epoch:       { usage: COUNTER ,description: "Latest checkpoint's NextXID epoch" }
    - next_xid:             { usage: COUNTER ,description: "Latest checkpoint's NextXID xid" }
    - next_oid:             { usage: COUNTER ,description: "Latest checkpoint's NextOID" }
    - next_multixact_id:    { usage: COUNTER ,description: "Latest checkpoint's NextMultiXactId" }
    - next_multi_offset:    { usage: COUNTER ,description: "Latest checkpoint's NextMultiOffset" }
    - oldest_xid:           { usage: COUNTER ,description: "Latest checkpoint's oldestXID" }
    - oldest_xid_dbid:      { usage: GAUGE   ,description: "Latest checkpoint's oldestXID's DB OID" }
    - oldest_active_xid:    { usage: COUNTER ,description: "Latest checkpoint's oldestActiveXID" }
    - oldest_multi_xid:     { usage: COUNTER ,description: "Latest checkpoint's oldestMultiXid" }
    - oldest_multi_dbid:    { usage: GAUGE   ,description: "Latest checkpoint's oldestMulti's DB OID" }
    - oldest_commit_ts_xid: { usage: COUNTER ,description: "Latest checkpoint's oldestCommitTsXid" }
    - newest_commit_ts_xid: { usage: COUNTER ,description: "Latest checkpoint's newestCommitTsXid" }
    - time:                 { usage: COUNTER ,description: "Time of latest checkpoint" }
    - elapse:               { usage: GAUGE   ,description: "Seconds elapsed since latest checkpoint in seconds" }




================================================
FILE: config/0355-pg_timeline.yml
================================================
#==============================================================#
# 0355 pg_timeline
#==============================================================#
pg_timeline:
  name: pg_timeline
  desc: Current timeline ID from primary or replica
  query: |
    SELECT COALESCE(
      (SELECT received_tli FROM pg_stat_wal_receiver),
      (SELECT timeline_id FROM pg_control_checkpoint())
    ) AS id;
  ttl: 10
  min_version: 100000
  tags: [ cluster ]
  metrics:
    - id: { usage: GAUGE ,description: "Current timeline ID" }




================================================
FILE: config/0360-pg_recovery.yml
================================================
#==============================================================#
# 0360 pg_recovery
#==============================================================#
pg_recovery:
  name: pg_recovery
  desc: PostgreSQL control recovery metrics (9.6+)
  query: |
    SELECT min_recovery_end_timeline    AS min_timeline,
      min_recovery_end_lsn - '0/0' AS min_lsn,
      backup_start_lsn - '0/0'     AS backup_start_lsn,
      backup_end_lsn - '0/0'       AS backup_end_lsn,
      end_of_backup_record_required AS require_record
    FROM pg_control_recovery();
  ttl: 10
  min_version: 090600
  tags: [ cluster, replica ]
  metrics:
    - min_timeline:      { usage: COUNTER ,description: "Min recovery ending loc's timeline" }
    - min_lsn:           { usage: COUNTER ,description: "Minimum recovery ending location" }
    - backup_start_lsn:  { usage: COUNTER ,description: "Backup start location" }
    - backup_end_lsn:    { usage: COUNTER ,description: "Backup end location" }
    - require_record:    { usage: GAUGE   ,description: "End-of-backup record required" }

pg_recovery_prefetch:
  name: pg_recovery_prefetch
  desc: PostgreSQL recovery prefetch metrics (15+)
  query: SELECT prefetch,hit,skip_init,skip_new,skip_fpw,skip_rep,wal_distance,block_distance,io_depth,extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_recovery_prefetch;
  ttl: 10
  min_version: 150000
  tags: [ cluster, replica ]
  metrics:
    - prefetch:       { usage: COUNTER ,description: "Number of blocks prefetched because they were not in the buffer pool" }
    - hit:            { usage: COUNTER ,description: "Number of blocks not prefetched because they were already in the buffer pool" }
    - skip_init:      { usage: COUNTER ,description: "Number of blocks not prefetched because they would be zero-initialized" }
    - skip_new:       { usage: COUNTER ,description: "Number of blocks not prefetched because they didn't exist yet" }
    - skip_fpw:       { usage: COUNTER ,description: "Number of blocks not prefetched because a full page image was included in the WAL" }
    - skip_rep:       { usage: COUNTER ,description: "Number of blocks not prefetched because they were already recently prefetched" }
    - wal_distance:   { usage: GAUGE   ,description: "How many bytes ahead the prefetcher is looking" }
    - block_distance: { usage: GAUGE   ,description: "How many blocks ahead the prefetcher is looking" }
    - io_depth:       { usage: GAUGE   ,description: "How many prefetches have been initiated but are not yet known to have completed" }
    - reset_time:     { usage: GAUGE   ,description: "Time at which these recovery prefetch statistics were last reset" }




================================================
FILE: config/0370-pg_slru.yml
================================================
#==============================================================#
# 0370 pg_slru
#==============================================================#
pg_slru_13:
  name: pg_slru
  desc: PostgreSQL simple-least-recently-used (SLRU) cache statistics v13
  query: SELECT name, blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, flushes, truncates, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_slru;
  ttl: 60
  min_version: 130000
  tags: [ cluster ]
  metrics:
    - name:         { usage: LABEL   ,description: "Name of the SLRU" }
    - blks_zeroed:  { usage: COUNTER ,description: "Number of blocks zeroed during initializations" }
    - blks_hit:     { usage: COUNTER ,description: "Number of times disk blocks were found already in the SLRU, so that a read was not necessary" }
    - blks_read:    { usage: COUNTER ,description: "Number of disk blocks read for this SLRU" }
    - blks_written: { usage: COUNTER ,description: "Number of disk blocks written for this SLRU" }
    - blks_exists:  { usage: COUNTER ,description: "Number of blocks checked for existence for this SLRU" }
    - flushes:      { usage: COUNTER ,description: "Number of flushes of dirty data for this SLRU" }
    - truncates:    { usage: COUNTER ,description: "Number of truncates for this SLRU" }
    - reset_time:   { usage: GAUGE   ,description: "Time at which these statistics were last reset" }




================================================
FILE: config/0380-pg_shmem.yml
================================================
#==============================================================#
# 0380 pg_shmem
#==============================================================#
# pg_shmem require su privilege to work. Disable it or create auxiliary function with su before use:
# CREATE OR REPLACE FUNCTION monitor.pg_shmem() RETURNS SETOF pg_shmem_allocations AS $$ SELECT * FROM pg_shmem_allocations;$$ LANGUAGE SQL SECURITY DEFINER;
pg_shmem:
  name: pg_shmem
  desc: Allocations made from the server's main shared memory segment
  query: SELECT coalesce(name, 'Free') AS name, off AS offset, size, allocated_size FROM monitor.pg_shmem();
  ttl: 60
  min_version: 130000
  skip: true            # disable it by default
  tags: [cluster, "schema:monitor" ]
  metrics:
    - name:            { usage: LABEL ,description: "Name of the shared memory allocation" }
    - offset:          { usage: GAUGE ,description: "The offset at which the allocation starts" }
    - size:            { usage: GAUGE ,description: "Size of the allocation" }
    - allocated_size:  { usage: GAUGE ,description: "Size of the allocation including padding" }




================================================
FILE: config/0390-pg_wal.yml
================================================
#==============================================================#
# 0390 pg_wal
#==============================================================#
pg_wal_18:
  name: pg_wal
  desc: PostgreSQL WAL statistics since v18 with some col removed
  query: SELECT wal_records AS records, wal_fpi AS fpi, wal_bytes AS bytes, wal_buffers_full AS buffers_full,extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_wal;
  ttl: 10
  tags: [ cluster ]
  min_version: 180000
  metrics:
    - records:      { usage: COUNTER              ,description: "Total number of WAL records generated" }
    - fpi:          { usage: COUNTER              ,description: "Total number of WAL full page images generated" }
    - bytes:        { usage: COUNTER              ,description: "Total amount of WAL generated in bytes" }
    - buffers_full: { usage: COUNTER              ,description: "Number of times WAL data was written to disk because WAL buffers became full" }
    - reset_time:   { usage: GAUGE                ,description: "When statistics were last reset" }

pg_wal_14:
  name: pg_wal
  desc: PostgreSQL WAL statistics since v14
  query: SELECT wal_records AS records, wal_fpi AS fpi, wal_bytes AS bytes, wal_buffers_full AS buffers_full, wal_write AS write, wal_sync AS sync, wal_write_time AS write_time, wal_sync_time AS sync_time, extract(EPOCH FROM stats_reset) AS reset_time FROM pg_stat_wal;
  ttl: 10
  tags: [ cluster ]
  min_version: 140000
  max_version: 180000
  metrics:
    - records:      { usage: COUNTER              ,description: "Total number of WAL records generated" }
    - fpi:          { usage: COUNTER              ,description: "Total number of WAL full page images generated" }
    - bytes:        { usage: COUNTER              ,description: "Total amount of WAL generated in bytes" }
    - buffers_full: { usage: COUNTER              ,description: "Number of times WAL data was written to disk because WAL buffers became full" }
    - write:        { usage: COUNTER              ,description: "Number of times WAL buffers were written out to disk via XLogWrite request." }
    - sync:         { usage: COUNTER              ,description: "Number of times WAL files were synced to disk via issue_xlog_fsync request" }
    - write_time:   { usage: COUNTER ,scale: 1e-3 ,description: "Total amount of time spent writing WAL buffers to disk via XLogWrite request in seconds" }
    - sync_time:    { usage: COUNTER ,scale: 1e-3 ,description: "Total amount of time spent syncing WAL files to disk via issue_xlog_fsync request, in seconds" }
    - reset_time:   { usage: GAUGE                ,description: "When statistics were last reset" }




================================================
FILE: config/0410-pg_activity.yml
================================================
#==============================================================#
# 0410 pg_activity
#==============================================================#
pg_activity:
  name: pg_activity
  desc: PostgreSQL backend activity group by database and state
  query: |-
    SELECT datname, state, coalesce(count, 0) AS count, coalesce(max_duration, 0) AS max_duration, coalesce(max_tx_duration, 0) AS max_tx_duration, coalesce(max_conn_duration, 0) AS max_conn_duration FROM
        (SELECT d.datname, a.state FROM pg_database d, unnest(ARRAY ['active','idle','idle in transaction','idle in transaction (aborted)','fastpath function call','disabled']) a(state) WHERE d.datallowconn AND NOT d.datistemplate) base
          LEFT JOIN (SELECT datname, state, count(*) AS count, max(extract(epoch from now() - state_change)) AS max_duration, max(extract(epoch from now() - xact_start))
          AS max_tx_duration, max(extract(epoch from now() - backend_start)) AS max_conn_duration FROM pg_stat_activity WHERE pid <> pg_backend_pid() GROUP BY 1,2) data USING (datname,state);
  ttl: 10
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - datname:           { usage: LABEL ,description: "Name of the database this backend is connected to" }
    - state:             { usage: LABEL ,description: "Current overall state of this backend." }
    - count:             { usage: GAUGE ,description: "Count of connection among (datname,state)" }
    - max_duration:      { usage: GAUGE ,description: "Max duration since last state change among (datname, state)" }
    - max_tx_duration:   { usage: GAUGE ,description: "Max transaction duration since state change among (datname, state)" }
    - max_conn_duration: { usage: GAUGE ,description: "Max backend session duration since state change among (datname, state)" }




================================================
FILE: config/0420-pg_wait.yml
================================================
#==============================================================#
# 0420 pg_wait
#==============================================================#
pg_wait:
  name: pg_wait
  desc: PostgreSQL backend client count group by wait event type since 9.6
  query: |
    SELECT coalesce(datname, '_system') AS datname, coalesce(wait_event_type, 'Running') AS event, count(*) AS count FROM pg_stat_activity GROUP BY 1, 2;
  ttl: 10
  min_version: 090600
  tags: [ cluster ]
  metrics:
    - datname: { usage: LABEL ,description: "Name of the database, _system for global process" }
    - event:   { usage: LABEL ,description: "Wait event type" }
    - count:   { usage: GAUGE ,description: "Count of WaitEvent on target database" }




================================================
FILE: config/0430-pg_backend.yml
================================================
#==============================================================#
# 0430 pg_backend
#==============================================================#
pg_backend:
  name: pg_backend
  desc: PostgreSQL backend client count group by wait event type since 10
  query: SELECT backend_type AS "type", count(*) AS count FROM pg_stat_activity GROUP BY backend_type;
  ttl: 10
  min_version: 100000
  tags: [ cluster ]
  metrics:
    - type:  { usage: LABEL ,description: "Database backend process type" }
    - count: { usage: GAUGE ,description: "Database backend process count by backend_type" }




================================================
FILE: config/0440-pg_xact.yml
================================================
#==============================================================#
# 0440 pg_xact
#==============================================================#
pg_xact:
  name: pg_xact
  desc: PostgreSQL transaction identifier metrics
  query: WITH snap(v) AS (SELECT txid_current_snapshot()), xset(v) AS  (SELECT txid_snapshot_xip(v) FROM snap), xnum(v) AS (SELECT count(*) from xset), xmin(v) AS (SELECT txid_snapshot_xmin(v) FROM snap), xmax(v) AS (SELECT txid_snapshot_xmax(v) FROM snap) SELECT xmin.v AS xmin, xmax.v AS xmax, xnum.v AS xnum FROM xmin, xmax, xnum;
  ttl: 10
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - xmin: { usage: COUNTER ,description: "Earliest txid that is still active" }
    - xmax: { usage: COUNTER ,description: "First as-yet-unassigned txid. txid >= this are invisible." }
    - xnum: { usage: GAUGE   ,description: "Current active transaction count" }




================================================
FILE: config/0450-pg_lock.yml
================================================
#==============================================================#
# 0450 pg_lock
#==============================================================#
pg_lock:
  name: pg_lock
  desc: PostgreSQL lock distribution by mode and database
  query: |
    SELECT datname, mode, coalesce(count, 0) AS count
      FROM (SELECT d.oid AS database, d.datname, l.mode FROM pg_database d, unnest(ARRAY ['AccessShareLock','RowShareLock','RowExclusiveLock','ShareUpdateExclusiveLock', 'ShareLock','ShareRowExclusiveLock','ExclusiveLock','AccessExclusiveLock']) l(mode) WHERE d.datallowconn AND NOT d.datistemplate) base
      LEFT JOIN (SELECT database, mode, count(*) AS count FROM pg_locks WHERE database IS NOT NULL GROUP BY 1, 2) cnt USING (database, mode);
  ttl: 10
  min_version: 090400
  tags: [ cluster ]
  metrics:
    - datname: { usage: LABEL ,description: "Name of the database this backend is connected to" }
    - mode:    { usage: LABEL ,description: "Name of the lock mode held or desired by this process" }
    - count:   { usage: GAUGE ,description: "Number of locks of corresponding mode and database" }




================================================
FILE: config/0460-pg_query.yml
================================================
#==============================================================#
# 0460 pg_query
#==============================================================#
pg_query_17:
  name: pg_query
  desc: PostgreSQL Query metrics, require pg_stat_statements installed, 17+
  query: |-
    SELECT datname, queryid AS query, sum(calls) AS calls, sum(rows) AS rows, sum(total_exec_time) AS exec_time, sum(shared_blk_read_time) + sum(shared_blk_write_time) AS io_time, sum(wal_bytes) AS wal_bytes
    ,sum(shared_blks_hit) AS sblk_hit, sum(shared_blks_read) AS sblk_read, sum(shared_blks_dirtied) AS sblk_dirtied, sum(shared_blks_written) AS sblk_written
      FROM pg_stat_statements(false) s JOIN pg_database d ON s.dbid = d.oid WHERE userid != 10 AND calls > 4 GROUP BY 1, 2 ORDER BY 3 DESC LIMIT 128;

  ttl: 10
  timeout: 2
  min_version: 170000
  tags: [ cluster, "extension:pg_stat_statements" ]
  metrics:
    - datname:      { usage: LABEL   ,description: "Name of database" }
    - query:        { usage: LABEL   ,description: "QueryID generated from internal hash code, computed from the statement's parse tree" }
    - calls:        { usage: COUNTER ,description: "Number of times the statement was executed" }
    - rows:         { usage: COUNTER ,description: "Total number of rows retrieved or affected by the statement" }
    - exec_time:    { usage: COUNTER ,scale: 1e-3 ,description: "Total time spent executing the statement, in seconds" }
    - io_time:      { usage: COUNTER ,scale: 1e-3 ,description: "Total time the statement spent reading and writing blocks, in seconds" }
    - wal_bytes:    { usage: COUNTER ,description: "Total amount of WAL bytes generated by the statement" }
    - sblk_hit:     { usage: COUNTER ,description: "Total number of shared block cache hits by the statement" }
    - sblk_read:    { usage: COUNTER ,description: "Total number of shared blocks read by the statement" }
    - sblk_dirtied: { usage: COUNTER ,description: "Total number of shared blocks dirtied by the statement" }
    - sblk_written: { usage: COUNTER ,description: "Total number of shared blocks written by the statement" }

pg_query_13:
  name: pg_query
  desc: PostgreSQL Query metrics, require pg_stat_statements installed, 13 - 16
  query: |-
    SELECT datname, queryid AS query, sum(calls) AS calls, sum(rows) AS rows, sum(total_exec_time) AS exec_time, sum(blk_read_time) + sum(blk_write_time) AS io_time, sum(wal_bytes) AS wal_bytes
    ,sum(shared_blks_hit) AS sblk_hit, sum(shared_blks_read) AS sblk_read, sum(shared_blks_dirtied) AS sblk_dirtied, sum(shared_blks_written) AS sblk_written
      FROM pg_stat_statements(false) s JOIN pg_database d ON s.dbid = d.oid WHERE userid != 10 AND calls > 4 GROUP BY 1, 2 ORDER BY 3 DESC LIMIT 128;

  ttl: 10
  timeout: 2
  min_version: 130000
  max_version: 170000
  tags: [ cluster, "extension:pg_stat_statements" ]
  metrics:
    - datname:      { usage: LABEL   ,description: "Name of database" }
    - query:        { usage: LABEL   ,description: "QueryID generated from internal hash code, computed from the statement's parse tree" }
    - calls:        { usage: COUNTER ,description: "Number of times the statement was executed" }
    - rows:         { usage: COUNTER ,description: "Total number of rows retrieved or affected by the statement" }
    - exec_time:    { usage: COUNTER ,scale: 1e-3 ,description: "Total time spent executing the statement, in seconds" }
    - io_time:      { usage: COUNTER ,scale: 1e-3 ,description: "Total time the statement spent reading and writing blocks, in seconds" }
    - wal_bytes:    { usage: COUNTER ,description: "Total amount of WAL bytes generated by the statement" }
    - sblk_hit:     { usage: COUNTER ,description: "Total number of shared block cache hits by the statement" }
    - sblk_read:    { usage: COUNTER ,description: "Total number of shared blocks read by the statement" }
    - sblk_dirtied: { usage: COUNTER ,description: "Total number of shared blocks dirtied by the statement" }
    - sblk_written: { usage: COUNTER ,description: "Total number of shared blocks written by the statement" }

pg_query_10:
  name: pg_query
  desc: PostgreSQL query statement metrics, require pg_stat_statements installed, 9.4 ~ 12
  query: |-
    SELECT datname, queryid AS query, sum(calls) AS calls, sum(rows) AS rows, sum(total_time) AS exec_time, sum(blk_read_time) + sum(blk_write_time) AS io_time,
    sum(shared_blks_hit) AS sblk_hit, sum(shared_blks_read) AS sblk_read, sum(shared_blks_dirtied) AS sblk_dirtied, sum(shared_blks_written) AS sblk_written
    FROM pg_stat_statements(false) s JOIN pg_database d ON s.dbid = d.oid WHERE userid != 10 AND calls > 4 GROUP BY 1, 2 ORDER BY 3 DESC LIMIT 128;

  ttl: 10
  timeout: 2
  min_version: 090400
  max_version: 130000
  tags: [ cluster, "extension:pg_stat_statements" ]
  metrics:
    - datname:      { usage: LABEL   ,description: "Name of database" }
    - query:        { usage: LABEL   ,description: "QueryID generated from internal hash code, computed from the statement's parse tree" }
    - calls:        { usage: COUNTER ,description: "Number of times the statement was executed" }
    - rows:         { usage: COUNTER ,description: "Total number of rows retrieved or affected by the statement" }
    - exec_time:    { usage: COUNTER ,scale: 1e-3 ,description: "Total time spent executing the statement, in seconds" }
    - io_time:      { usage: COUNTER ,scale: 1e-3 ,description: "Total time the statement spent reading and writing blocks, in seconds" }
    - sblk_hit:     { usage: COUNTER ,description: "Total number of shared block cache hits by the statement" }
    - sblk_read:    { usage: COUNTER ,description: "Total number of shared blocks read by the statement" }
    - sblk_dirtied: { usage: COUNTER ,description: "Total number of shared blocks dirtied by the statement" }
    - sblk_written: { usage: COUNTER ,description: "Total number of shared blocks written by the statement" }




================================================
FILE: config/0510-pg_vacuuming.yml
================================================
#==============================================================#
# 0510 pg_vacuuming
#==============================================================#
pg_vacuuming_18:
  name: pg_vacuuming
  desc: PostgreSQL vacuum progress 18+
  query: |-
    SELECT datname, pid, relid::RegClass AS relname,
      CASE phase WHEN 'scanning heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_scanned / heap_blks_total ELSE 0.0 END)
        WHEN 'vacuuming heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_vacuumed / heap_blks_total ELSE 0 END) ELSE NULL END AS progress,
      indexes_total, indexes_processed, dead_tuple_bytes, delay_time
    FROM pg_stat_progress_vacuum;

  ttl: 10
  min_version: 180000
  tags: [ cluster, primary ]
  metrics:
    - datname:           { usage: LABEL   ,description: "database name" }
    - pid:               { usage: LABEL   ,description: "process id of vacuum worker" }
    - relname:           { usage: LABEL   ,description: "relation name of vacuuming table" }
    - progress:          { usage: GAUGE   ,description: "vacuum progress ratio (0-1) based on heap blocks scanned/vacuumed" }
    - indexes_total:     { usage: GAUGE   ,description: "total number of indexes that will be vacuumed or cleaned up" }
    - indexes_processed: { usage: GAUGE   ,description: "number of indexes that have been vacuumed or cleaned up" }
    - dead_tuple_bytes:  { usage: GAUGE   ,description: "total size of dead tuples collected since the beginning of vacuum in bytes" }
    - delay_time:        { usage: COUNTER ,scale: 1e-3 ,description: "total time spent sleeping due to cost-based delay in seconds" }

pg_vacuuming_17:
  name: pg_vacuuming
  desc: PostgreSQL vacuum progress 17 (with index progress tracking)
  query: |-
    SELECT datname, pid, relid::RegClass AS relname,
      CASE phase WHEN 'scanning heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_scanned / heap_blks_total ELSE 0.0 END)
        WHEN 'vacuuming heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_vacuumed / heap_blks_total ELSE 0 END) ELSE NULL END AS progress,
      indexes_total, indexes_processed, dead_tuple_bytes
    FROM pg_stat_progress_vacuum;

  ttl: 10
  min_version: 170000
  max_version: 180000
  tags: [ cluster, primary ]
  metrics:
    - datname:           { usage: LABEL ,description: "database name" }
    - pid:               { usage: LABEL ,description: "process id of vacuum worker" }
    - relname:           { usage: LABEL ,description: "relation name of vacuuming table" }
    - progress:          { usage: GAUGE ,description: "vacuum progress ratio (0-1) based on heap blocks scanned/vacuumed" }
    - indexes_total:     { usage: GAUGE ,description: "total number of indexes that will be vacuumed or cleaned up" }
    - indexes_processed: { usage: GAUGE ,description: "number of indexes that have been vacuumed or cleaned up" }
    - dead_tuple_bytes:  { usage: GAUGE ,description: "total size of dead tuples collected since the beginning of vacuum in bytes" }

pg_vacuuming_12:
  name: pg_vacuuming
  desc: PostgreSQL vacuum progress 12-16
  query: |-
    SELECT datname, pid, relid::RegClass AS relname,
      CASE phase WHEN 'scanning heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_scanned / heap_blks_total ELSE 0.0 END)
        WHEN 'vacuuming heap' THEN (CASE WHEN heap_blks_total > 0 THEN 1.0 * heap_blks_vacuumed / heap_blks_total ELSE 0 END) ELSE NULL END AS progress
    FROM pg_stat_progress_vacuum;

  ttl: 10
  min_version: 120000
  max_version: 170000
  tags: [ cluster, primary ]
  metrics:
    - datname:   { usage: LABEL ,description: "database name" }
    - pid:       { usage: LABEL ,description: "process id of vacuum worker" }
    - relname:   { usage: LABEL ,description: "relation name of vacuuming table" }
    - progress:  { usage: GAUGE ,description: "vacuum progress ratio (0-1) based on heap blocks scanned/vacuumed" }




================================================
FILE: config/0520-pg_indexing.yml
================================================
#==============================================================#
# 0520 pg_indexing
#==============================================================#
pg_indexing:
  name: pg_indexing
  desc: PostgreSQL index creating progress (v12+)
  query: |-
    SELECT datname, pid, relid::RegClass AS relname,
      (CASE WHEN blocks_total > 0 THEN 1.0 * blocks_done / blocks_total ELSE NULL END) AS blocks,
      (CASE WHEN tuples_total > 0 THEN 1.0 * tuples_done / tuples_total ELSE NULL END) AS tuples,
      (CASE WHEN partitions_total > 0 THEN 1.0 * partitions_done / partitions_total ELSE NULL END) AS partitions,
      (CASE WHEN lockers_total > 0 THEN 1.0 * lockers_done / lockers_total ELSE NULL END) AS lockers
    FROM pg_stat_progress_create_index pspci;

  ttl: 10
  min_version: 120000
  tags: [ cluster, primary ]
  metrics:
    - datname:     { usage: LABEL ,description: "Name of the database" }
    - pid:         { usage: LABEL ,description: "Process id of indexing table" }
    - relname:     { usage: LABEL ,description: "Relation name of indexed table" }
    - blocks:      { usage: GAUGE ,description: "Percent of blocks been proceeded" }
    - tuples:      { usage: GAUGE ,description: "Percent of tuples been proceeded" }
    - partitions:  { usage: GAUGE ,description: "Percent of partitions been proceeded" }
    - lockers:     { usage: GAUGE ,description: "Percent of lockers been proceeded" }




================================================
FILE: config/0530-pg_clustering.yml
================================================
#==============================================================#
# 0530 pg_clustering
#==============================================================#
pg_clustering:
  name: pg_clustering
  desc: PostgreSQL cluster or vacuum full progress (v12+)
  query: SELECT datname, pid, relid::RegClass AS relname, param4 AS tup_scan, CASE WHEN param6 > 0 THEN 1.0 * param7 / param6 ELSE 0 END AS progress FROM pg_stat_get_progress_info('cluster') s LEFT JOIN pg_database d ON s.datid = d.oid;
  ttl: 10
  min_version: 120000
  tags: [ cluster, primary ]
  metrics:
    - datname:     { usage: LABEL ,description: "Name of database been clustering" }
    - pid:         { usage: LABEL ,description: "Process id of indexing table" }
    - relname:     { usage: LABEL ,description: "Relation name of indexed table" }
    - tup_scan:    { usage: GAUGE ,description: "How much tuple been scanned" }
    - progress:    { usage: GAUGE ,description: "Progress of heap been processed" }




================================================
FILE: config/0540-pg_backup.yml
================================================
#==============================================================#
# 0540 pg_backup
#==============================================================#
pg_backup:
  name: pg_backup
  desc: PostgreSQL basebackup progress since 13
  query: SELECT pid, param1 AS phase, CASE param2 WHEN -1::integer THEN NULL::bigint ELSE param2 END AS total_bytes, param3 AS sent_bytes FROM pg_stat_get_progress_info('BASEBACKUP');
  ttl: 10
  min_version: 130000
  tags: [ cluster ]
  metrics:
    - pid:           { usage: LABEL ,description: "process id of basebackup sender" }
    - phase:         { usage: GAUGE ,description: "Phase encoded in 0~5 initial, wait checkpoint, estimate, streaming, waiting archive, transfer archive" }
    - total_bytes:   { usage: GAUGE ,description: "Total amount of data that will be streamed" }
    - sent_bytes:    { usage: GAUGE ,description: "Amount of data streamed" }




================================================
FILE: config/0610-pg_db.yml
================================================
#==============================================================#
# 0610 pg_db
#==============================================================#
pg_db_18:
  name: pg_db
  desc: PostgreSQL database stats from pg_stat_database v18
  query: |-
    SELECT d.datname, datid,age(datfrozenxid) AS age, datistemplate AS is_template, datallowconn AS allow_conn, datconnlimit AS conn_limit, datfrozenxid::TEXT::BIGINT as frozen_xid,
      numbackends,xact_commit,xact_rollback,xact_rollback + xact_commit AS xact_total,blks_read,blks_hit,blks_read + blks_hit AS blks_access,tup_returned,tup_fetched,tup_inserted,tup_updated,tup_deleted,tup_inserted + tup_updated + tup_deleted AS tup_modified,
      conflicts,temp_files,temp_bytes,deadlocks,coalesce(checksum_failures, -1) AS cks_fails, checksum_last_failure AS cks_fail_time,blk_read_time,blk_write_time,
      session_time,active_time,idle_in_transaction_time AS ixact_time,sessions,sessions_abandoned,sessions_fatal,sessions_killed,parallel_workers_to_launch,parallel_workers_launched,
      extract(EPOCH FROM stats_reset) AS reset_time
    FROM pg_database d JOIN pg_stat_database sd ON d.oid = sd.datid;

  ttl: 10
  min_version: 180000
  tags: [ cluster ]
  metrics:
    - datname:             { usage: LABEL   ,description: "Name of the database" }
    - datid:               { usage: GAUGE   ,description: "OID of the database" }
    - age:                 { usage: GAUGE   ,description: "Age of database calculated from datfrozenxid" }
    - is_template:         { usage: GAUGE   ,description: "If true(1), then this database can be cloned by any user with CREATEDB privileges" }
    - allow_conn:          { usage: GAUGE   ,description: "If false(0) then no one can connect to this database." }
    - conn_limit:          { usage: GAUGE   ,description: "Sets maximum number of concurrent connections that can be made to this database. -1 means no limit." }
    - frozen_xid:          { usage: GAUGE   ,description: "All transaction IDs before this one have been frozen" }
    - numbackends:         { usage: GAUGE   ,description: "Number of backends currently connected to this database" }
    - xact_commit:         { usage: COUNTER ,description: "Number of transactions in this database that have been committed" }
    - xact_rollback:       { usage: COUNTER ,description: "Number of transactions in this database that have been rolled back" }
    - xact_total:          { usage: COUNTER ,description: "Number of transactions in this database" }
    - blks_read:           { usage: COUNTER ,description: "Number of disk blocks read in this database" }
    - blks_hit:            { usage: COUNTER ,description: "Number of times disk blocks were found already in the buffer cache" }
    - blks_access:         { usage: COUNTER ,description: "Number of times disk blocks that accessed read+hit" }
    - tup_returned:        { usage: COUNTER ,description: "Number of rows returned by queries in this database" }
    - tup_fetched:         { usage: COUNTER ,description: "Number of rows fetched by queries in this database" }
    - tup_inserted:        { usage: COUNTER ,description: "Number of rows inserted by queries in this database" }
    - tup_updated:         { usage: COUNTER ,description: "Number of rows updated by queries in this database" }
    - tup_deleted:         { usage: COUNTER ,description: "Number of rows deleted by queries in this database" }
    - tup_modified:        { usage: COUNTER ,description: "Number of rows modified by queries in this database" }
    - conflicts:           { usage: COUNTER ,description: "Number of queries canceled due to conflicts with recovery in this database" }
    - temp_files:          { usage: COUNTER ,description: "Number of temporary files created by queries in this database" }
    - temp_bytes:          { usage: COUNTER ,description: "Total amount of data written to temporary files by queries in this database." }
    - deadlocks:           { usage: COUNTER ,description: "Number of deadlocks detected in this database" }
    - cks_fails:           { usage: COUNTER ,description: "Number of data page checksum failures detected in this database, -1 for not enabled" }
    - cks_fail_time:       { usage: GAUGE   ,description: "Time at which the last data page checksum failure was detected in this database" }
    - blk_read_time:       { usage: COUNTER ,scale: 1e-3 ,description: "Time spent reading data file blocks by backends in this database, in seconds" }
    - blk_write_time:      { usage: COUNTER ,scale: 1e-3 ,description: "Time spent writing data file blocks by backends in this database, in seconds" }
    - session_time:        { usage: COUNTER ,scale: 1e-3 ,description: "Time spent by database sessions in this database, in seconds" }
    - active_time:         { usage: COUNTER ,scale: 1e-3 ,description: "Time spent executing SQL statements in this database, in seconds" }
    - ixact_time:          { usage: COUNTER ,scale: 1e-3 ,description: "Time spent idling while in a transaction in this database, in seconds" }
    - sessions:            { usage: COUNTER ,description: "Total number of sessions established to this database" }
    - sessions_abandoned:  { usage: COUNTER ,description: "Number of database sessions to this database that were terminated because connection to the client was lost" }
    - sessions_fatal:      { usage: COUNTER ,description: "Number of database sessions to this database that were terminated by fatal errors" }
    - sessions_killed:     { usage: COUNTER ,description: "Number of database sessions to this database that were terminated by operator intervention" }
    - parallel_workers_to_launch: { usage: COUNTER ,description: "Number of parallel workers planned to be launched by queries on this database" }
    - parallel_workers_launched:  { usage: COUNTER ,description: "Number of parallel workers launched by queries on this database" }
    - reset_time:          { usage: GAUGE   ,description: "Time at which database statistics were last reset" }

pg_db_14:
  name: pg_db
  desc: PostgreSQL database stats from pg_stat_database v14 (with 7 new time & session metrics)
  query: |-
    SELECT d.datname, datid,age(datfrozenxid) AS age, datistemplate AS is_template, datallowconn AS allow_conn, datconnlimit AS conn_limit, datfrozenxid::TEXT::BIGINT as frozen_xid,
      numbackends,xact_commit,xact_rollback,xact_rollback + xact_commit AS xact_total,blks_read,blks_hit,blks_read + blks_hit AS blks_access,tup_returned,tup_fetched,tup_inserted,tup_updated,tup_deleted,tup_inserted + tup_updated + tup_deleted AS tup_modified,
      conflicts,temp_files,temp_bytes,deadlocks,coalesce(checksum_failures, -1) AS cks_fails, checksum_last_failure AS cks_fail_time,blk_read_time,blk_write_time,
      session_time,active_time,idle_in_transaction_time AS ixact_time,sessions,sessions_abandoned,sessions_fatal,sessions_killed,extract(EPOCH FROM stats_reset) AS reset_time
    FROM pg_database d JOIN pg_stat_database sd ON d.oid = sd.datid;

  ttl: 10
  min_version: 140000
  max_version: 180000
  tags: [ cluster ]
  metrics:
    - datname:             { usage: LABEL   ,description: "Name of the database" }
    - datid:               { usage: GAUGE   ,description: "OID of the database" }
    - age:                 { usage: GAUGE   ,description: "Age of database calculated from datfrozenxid" }
    - is_template:         { usage: GAUGE   ,description: "If true(1), then this database can be cloned by any user with CREATEDB privileges" }
    - allow_conn:          { usage: GAUGE   ,description: "If false(0) then no one can connect to this database." }
    - conn_limit:          { usage: GAUGE   ,description: "Sets maximum number of concurrent connections that can be made to this database. -1 means no limit." }
    - frozen_xid:          { usage: GAUGE   ,description: "All transaction IDs before this one have been frozen" }
    - numbackends:         { usage: GAUGE   ,description: "Number of backends currently connected to thi
Download .txt
gitextract_340wwud1/

├── .github/
│   └── workflows/
│       ├── release.yaml
│       └── test-release.yaml
├── .gitignore
├── .goreleaser.yml
├── Dockerfile
├── Dockerfile.goreleaser
├── LICENSE
├── Makefile
├── README.md
├── config/
│   ├── 0000-doc.yml
│   ├── 0110-pg.yml
│   ├── 0120-pg_meta.yml
│   ├── 0130-pg_setting.yml
│   ├── 0210-pg_repl.yml
│   ├── 0220-pg_sync_standby.yml
│   ├── 0230-pg_downstream.yml
│   ├── 0240-pg_slot.yml
│   ├── 0250-pg_recv.yml
│   ├── 0260-pg_sub.yml
│   ├── 0270-pg_origin.yml
│   ├── 0300-pg_io.yml
│   ├── 0310-pg_size.yml
│   ├── 0320-pg_archiver.yml
│   ├── 0330-pg_bgwriter.yml
│   ├── 0331-pg_checkpointer.yml
│   ├── 0340-pg_ssl.yml
│   ├── 0350-pg_checkpoint.yml
│   ├── 0355-pg_timeline.yml
│   ├── 0360-pg_recovery.yml
│   ├── 0370-pg_slru.yml
│   ├── 0380-pg_shmem.yml
│   ├── 0390-pg_wal.yml
│   ├── 0410-pg_activity.yml
│   ├── 0420-pg_wait.yml
│   ├── 0430-pg_backend.yml
│   ├── 0440-pg_xact.yml
│   ├── 0450-pg_lock.yml
│   ├── 0460-pg_query.yml
│   ├── 0510-pg_vacuuming.yml
│   ├── 0520-pg_indexing.yml
│   ├── 0530-pg_clustering.yml
│   ├── 0540-pg_backup.yml
│   ├── 0610-pg_db.yml
│   ├── 0620-pg_db_confl.yml
│   ├── 0640-pg_pubrel.yml
│   ├── 0650-pg_subrel.yml
│   ├── 0700-pg_table.yml
│   ├── 0710-pg_index.yml
│   ├── 0720-pg_func.yml
│   ├── 0730-pg_seq.yml
│   ├── 0740-pg_relkind.yml
│   ├── 0750-pg_defpart.yml
│   ├── 0810-pg_table_size.yml
│   ├── 0820-pg_table_bloat.yml
│   ├── 0830-pg_index_bloat.yml
│   ├── 0910-pgbouncer_list.yml
│   ├── 0920-pgbouncer_database.yml
│   ├── 0930-pgbouncer_stat.yml
│   ├── 0940-pgbouncer_pool.yml
│   ├── 1000-pg_wait_event.yml
│   ├── 1800-pg_tsdb_hypertable.yml
│   ├── 1900-pg_citus.yml
│   └── 2000-pg_heartbeat.yml
├── docker/
│   ├── .dockerignore
│   ├── README.md
│   ├── build.sh
│   └── release.sh
├── exporter/
│   ├── arg.go
│   ├── args_normalize.go
│   ├── args_normalize_test.go
│   ├── collector.go
│   ├── column.go
│   ├── concurrency_test.go
│   ├── config.go
│   ├── config_coverage_pg9_test.go
│   ├── config_coverage_test.go
│   ├── config_merged_test.go
│   ├── config_style_test.go
│   ├── config_test.go
│   ├── exporter.go
│   ├── exporter_handlers_opts_test.go
│   ├── global.go
│   ├── health_state_test.go
│   ├── main.go
│   ├── metrics_lifecycle_test.go
│   ├── pgurl.go
│   ├── pgurl_test.go
│   ├── predicate_cache_test.go
│   ├── probehealth_pgbouncer_test.go
│   ├── prom_validate.go
│   ├── query.go
│   ├── query_column_test.go
│   ├── reload_signals_unix.go
│   ├── reload_signals_windows.go
│   ├── reload_test.go
│   ├── server.go
│   ├── server_exporter_test.go
│   ├── testmain_test.go
│   ├── utils.go
│   ├── utils_test.go
│   ├── validate_labels.go
│   └── validate_labels_test.go
├── go.mod
├── go.sum
├── hugo.yaml
├── legacy/
│   ├── README.md
│   ├── config/
│   │   ├── 0000-doc.yml
│   │   ├── 0110-pg.yml
│   │   ├── 0120-pg_meta.yml
│   │   ├── 0130-pg_setting.yml
│   │   ├── 0210-pg_repl.yml
│   │   ├── 0220-pg_sync_standby.yml
│   │   ├── 0230-pg_downstream.yml
│   │   ├── 0240-pg_slot.yml
│   │   ├── 0250-pg_recv.yml
│   │   ├── 0270-pg_origin.yml
│   │   ├── 0310-pg_size.yml
│   │   ├── 0320-pg_archiver.yml
│   │   ├── 0330-pg_bgwriter.yml
│   │   ├── 0331-pg_checkpointer.yml
│   │   ├── 0340-pg_ssl.yml
│   │   ├── 0350-pg_checkpoint.yml
│   │   ├── 0355-pg_timeline.yml
│   │   ├── 0360-pg_recovery.yml
│   │   ├── 0410-pg_activity.yml
│   │   ├── 0420-pg_wait.yml
│   │   ├── 0440-pg_xact.yml
│   │   ├── 0450-pg_lock.yml
│   │   ├── 0460-pg_query.yml
│   │   ├── 0610-pg_db.yml
│   │   ├── 0620-pg_db_confl.yml
│   │   ├── 0700-pg_table.yml
│   │   ├── 0710-pg_index.yml
│   │   ├── 0720-pg_func.yml
│   │   ├── 0740-pg_relkind.yml
│   │   ├── 0810-pg_table_size.yml
│   │   ├── 0820-pg_table_bloat.yml
│   │   ├── 0830-pg_index_bloat.yml
│   │   ├── 0910-pgbouncer_list.yml
│   │   ├── 0920-pgbouncer_database.yml
│   │   ├── 0930-pgbouncer_stat.yml
│   │   ├── 0940-pgbouncer_pool.yml
│   │   ├── 1800-pg_tsdb_hypertable.yml
│   │   ├── 1900-pg_citus.yml
│   │   └── 2000-pg_heartbeat.yml
│   └── pg_exporter.yml
├── main.go
├── monitor/
│   ├── initdb.sh
│   ├── pgrds-instance.json
│   └── pgsql-exporter.json
├── package/
│   ├── nfpm-amd64-deb.yaml
│   ├── nfpm-amd64-rpm.yaml
│   ├── nfpm-arm64-deb.yaml
│   ├── nfpm-arm64-rpm.yaml
│   ├── pg_exporter.default
│   ├── pg_exporter.service
│   └── preinstall.sh
└── pg_exporter.yml
Download .txt
SYMBOL INDEX (226 symbols across 34 files)

FILE: exporter/arg.go
  function ParseArgs (line 42) | func ParseArgs() {

FILE: exporter/args_normalize.go
  function normalizeKingpinBoolEqualsArgs (line 18) | func normalizeKingpinBoolEqualsArgs(args []string, model *kingpin.Applic...

FILE: exporter/args_normalize_test.go
  function TestNormalizeKingpinBoolEqualsArgs_Long (line 10) | func TestNormalizeKingpinBoolEqualsArgs_Long(t *testing.T) {
  function TestNormalizeKingpinBoolEqualsArgs_Short (line 36) | func TestNormalizeKingpinBoolEqualsArgs_Short(t *testing.T) {

FILE: exporter/collector.go
  type predicateCacheEntry (line 17) | type predicateCacheEntry struct
  type Collector (line 24) | type Collector struct
    method Describe (line 61) | func (q *Collector) Describe(ch chan<- *prometheus.Desc) {
    method Collect (line 68) | func (q *Collector) Collect(ch chan<- prometheus.Metric) {
    method ResultSize (line 86) | func (q *Collector) ResultSize() int {
    method Error (line 91) | func (q *Collector) Error() error {
    method PredicateSkip (line 96) | func (q *Collector) PredicateSkip() (bool, string) {
    method Duration (line 101) | func (q *Collector) Duration() float64 {
    method CacheHit (line 106) | func (q *Collector) CacheHit() bool {
    method executePredicateQueries (line 113) | func (q *Collector) executePredicateQueries(ctx context.Context) bool {
    method execute (line 216) | func (q *Collector) execute() {
    method makeDescMap (line 323) | func (q *Collector) makeDescMap() {
    method sendDescriptors (line 351) | func (q *Collector) sendDescriptors(ch chan<- *prometheus.Desc) {
    method cacheExpired (line 359) | func (q *Collector) cacheExpired() bool {
    method cacheTTL (line 363) | func (q *Collector) cacheTTL() float64 {
    method sendMetrics (line 368) | func (q *Collector) sendMetrics(ch chan<- prometheus.Metric) {
  function NewCollector (line 47) | func NewCollector(q *Query, s *Server) *Collector {

FILE: exporter/column.go
  constant DISCARD (line 14) | DISCARD   = "DISCARD"
  constant LABEL (line 15) | LABEL     = "LABEL"
  constant COUNTER (line 16) | COUNTER   = "COUNTER"
  constant GAUGE (line 17) | GAUGE     = "GAUGE"
  constant HISTOGRAM (line 18) | HISTOGRAM = "HISTOGRAM"
  type Column (line 30) | type Column struct
    method parseNumbers (line 46) | func (c *Column) parseNumbers() error {
    method PrometheusValueType (line 67) | func (c *Column) PrometheusValueType() prometheus.ValueType {
    method String (line 80) | func (c *Column) String() string {
    method MetricDesc (line 85) | func (c *Column) MetricDesc(prefix string, labels []string) *MetricDesc {
  type MetricDesc (line 98) | type MetricDesc struct
    method String (line 105) | func (m *MetricDesc) String() string {

FILE: exporter/concurrency_test.go
  function newMockExporter (line 12) | func newMockExporter(up bool, recovery bool) *Exporter {
  function TestReloadAndHealthHandlersNoDeadlock (line 27) | func TestReloadAndHealthHandlersNoDeadlock(t *testing.T) {

FILE: exporter/config.go
  function GetConfig (line 13) | func GetConfig() (res string) {
  function ParseConfig (line 35) | func ParseConfig(content []byte) (queries map[string]*Query, err error) {
  function FinalizeQueries (line 156) | func FinalizeQueries(queries map[string]*Query, source string) error {
  function ParseQuery (line 175) | func ParseQuery(config string) (*Query, error) {
  function LoadConfig (line 197) | func LoadConfig(configPath string) (queries map[string]*Query, err error) {

FILE: exporter/config_coverage_pg9_test.go
  function TestConfigCoveragePG9 (line 12) | func TestConfigCoveragePG9(t *testing.T) {

FILE: exporter/config_coverage_test.go
  function TestConfigCoveragePG10To18 (line 13) | func TestConfigCoveragePG10To18(t *testing.T) {

FILE: exporter/config_merged_test.go
  function parseConfigDirLikeMerge (line 12) | func parseConfigDirLikeMerge(t *testing.T, dir string) map[string]*Query {
  function TestMergedConfigsMatchSplitDirectories (line 51) | func TestMergedConfigsMatchSplitDirectories(t *testing.T) {

FILE: exporter/config_style_test.go
  function TestInlineMetricDescriptionsUseDoubleQuotes (line 16) | func TestInlineMetricDescriptionsUseDoubleQuotes(t *testing.T) {
  function TestLegacySplitConfigsEndWithTwoBlankLines (line 64) | func TestLegacySplitConfigsEndWithTwoBlankLines(t *testing.T) {

FILE: exporter/config_test.go
  function TestParseConfigUsageCaseInsensitive (line 9) | func TestParseConfigUsageCaseInsensitive(t *testing.T) {
  function TestParseConfigInvalidUsage (line 40) | func TestParseConfigInvalidUsage(t *testing.T) {
  function TestParseConfigRejectsMultiColumnMetricsEntry (line 55) | func TestParseConfigRejectsMultiColumnMetricsEntry(t *testing.T) {
  function TestParseQueryErrors (line 70) | func TestParseQueryErrors(t *testing.T) {
  function TestLoadConfigDirectoryPriorityAndOverride (line 92) | func TestLoadConfigDirectoryPriorityAndOverride(t *testing.T) {
  function TestLoadConfigDirectoryAllInvalidReturnsError (line 142) | func TestLoadConfigDirectoryAllInvalidReturnsError(t *testing.T) {
  function TestGetConfigPrecedence (line 160) | func TestGetConfigPrecedence(t *testing.T) {

FILE: exporter/exporter.go
  constant healthStatusUnknown (line 18) | healthStatusUnknown int32 = iota
  constant healthStatusDown (line 19) | healthStatusDown
  constant healthStatusStarting (line 20) | healthStatusStarting
  constant healthStatusPrimary (line 21) | healthStatusPrimary
  constant healthStatusReplica (line 22) | healthStatusReplica
  type Exporter (line 27) | type Exporter struct
    method Up (line 93) | func (e *Exporter) Up() bool {
    method Recovery (line 98) | func (e *Exporter) Recovery() bool {
    method Status (line 103) | func (e *Exporter) Status() string {
    method updateHealthState (line 118) | func (e *Exporter) updateHealthState(up, recovery bool) {
    method updateHealthStateWithStartup (line 122) | func (e *Exporter) updateHealthStateWithStartup(up, recovery, starting...
    method updateHealthStateFromServer (line 141) | func (e *Exporter) updateHealthStateFromServer() {
    method probeAndUpdateHealthState (line 155) | func (e *Exporter) probeAndUpdateHealthState() error {
    method startHealthLoop (line 167) | func (e *Exporter) startHealthLoop() {
    method stopHealthLoop (line 201) | func (e *Exporter) stopHealthLoop() {
    method Describe (line 219) | func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
    method Collect (line 233) | func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
    method collectServerMetrics (line 281) | func (e *Exporter) collectServerMetrics(ch chan<- prometheus.Metric) {
    method Explain (line 337) | func (e *Exporter) Explain() string {
    method Stat (line 342) | func (e *Exporter) Stat() string {
    method Check (line 348) | func (e *Exporter) Check() {
    method Close (line 357) | func (e *Exporter) Close() {
    method setupInternalMetrics (line 383) | func (e *Exporter) setupInternalMetrics() {
    method collectInternalMetrics (line 518) | func (e *Exporter) collectInternalMetrics(ch chan<- prometheus.Metric) {
    method OnDatabaseChange (line 607) | func (e *Exporter) OnDatabaseChange(change map[string]bool) {
    method CreateServer (line 639) | func (e *Exporter) CreateServer(dbname string) {
    method RemoveServer (line 660) | func (e *Exporter) RemoveServer(dbname string) {
    method IterateServer (line 682) | func (e *Exporter) IterateServer() (res []*Server) {
    method ExplainFunc (line 805) | func (e *Exporter) ExplainFunc(w http.ResponseWriter, r *http.Request) {
    method StatFunc (line 819) | func (e *Exporter) StatFunc(w http.ResponseWriter, r *http.Request) {
    method UpCheckFunc (line 831) | func (e *Exporter) UpCheckFunc(w http.ResponseWriter, r *http.Request) {
    method PrimaryCheckFunc (line 853) | func (e *Exporter) PrimaryCheckFunc(w http.ResponseWriter, r *http.Req...
    method ReplicaCheckFunc (line 878) | func (e *Exporter) ReplicaCheckFunc(w http.ResponseWriter, r *http.Req...
  function NewExporter (line 535) | func NewExporter(dsn string, opts ...ExporterOpt) (e *Exporter, err erro...
  type ExporterOpt (line 696) | type ExporterOpt
  function WithConfig (line 699) | func WithConfig(configPath string) ExporterOpt {
  function WithConfigReader (line 706) | func WithConfigReader(reader io.Reader) ExporterOpt {
  function WithConstLabels (line 713) | func WithConstLabels(s string) ExporterOpt {
  function WithCacheDisabled (line 720) | func WithCacheDisabled(disableCache bool) ExporterOpt {
  function WithIntroDisabled (line 727) | func WithIntroDisabled(disableIntro bool) ExporterOpt {
  function WithFailFast (line 734) | func WithFailFast(failFast bool) ExporterOpt {
  function WithNamespace (line 741) | func WithNamespace(namespace string) ExporterOpt {
  function WithTags (line 748) | func WithTags(tags string) ExporterOpt {
  function WithAutoDiscovery (line 755) | func WithAutoDiscovery(flag bool) ExporterOpt {
  function WithExcludeDatabase (line 762) | func WithExcludeDatabase(excludeStr string) ExporterOpt {
  function WithIncludeDatabase (line 774) | func WithIncludeDatabase(includeStr string) ExporterOpt {
  function WithConnectTimeout (line 787) | func WithConnectTimeout(timeout int) ExporterOpt {
  function currentExporter (line 795) | func currentExporter() *Exporter {
  function VersionFunc (line 903) | func VersionFunc(w http.ResponseWriter, r *http.Request) {
  function TitleFunc (line 911) | func TitleFunc(w http.ResponseWriter, r *http.Request) {
  function ReloadFunc (line 917) | func ReloadFunc(w http.ResponseWriter, r *http.Request) {

FILE: exporter/exporter_handlers_opts_test.go
  function TestExporterOptionHelpers (line 11) | func TestExporterOptionHelpers(t *testing.T) {
  function TestPublicHandlers (line 52) | func TestPublicHandlers(t *testing.T) {
  function TestExplainAndStatHandlersWhenExporterUnavailable (line 68) | func TestExplainAndStatHandlersWhenExporterUnavailable(t *testing.T) {
  function TestHealthHandlersPassiveModeNoActiveProbe (line 89) | func TestHealthHandlersPassiveModeNoActiveProbe(t *testing.T) {

FILE: exporter/global.go
  function setCurrentExporter (line 37) | func setCurrentExporter(e *Exporter) {

FILE: exporter/health_state_test.go
  function TestIsPostgresStartupError (line 11) | func TestIsPostgresStartupError(t *testing.T) {
  function TestUpdateHealthStateWithStartup (line 29) | func TestUpdateHealthStateWithStartup(t *testing.T) {

FILE: exporter/main.go
  function DryRun (line 17) | func DryRun() {
  function Reload (line 40) | func Reload() error {
  function Run (line 90) | func Run() {

FILE: exporter/metrics_lifecycle_test.go
  function makeCachedCollectorForServer (line 10) | func makeCachedCollectorForServer(s *Server, name string, val float64) *...
  function TestExporterCollectAndInternalMetrics (line 21) | func TestExporterCollectAndInternalMetrics(t *testing.T) {
  function TestExporterDescribeAndCloseNoPanic (line 62) | func TestExporterDescribeAndCloseNoPanic(t *testing.T) {
  function TestDisableIntroSuppressesInternalMetrics (line 88) | func TestDisableIntroSuppressesInternalMetrics(t *testing.T) {
  function TestServerIntrospectionHelpers (line 129) | func TestServerIntrospectionHelpers(t *testing.T) {

FILE: exporter/pgurl.go
  function GetPGURL (line 10) | func GetPGURL() string {
  function RetrievePGURL (line 24) | func RetrievePGURL() (res string) {
  function ProcessPGURL (line 62) | func ProcessPGURL(pgurl string) string {
  function ShadowPGURL (line 79) | func ShadowPGURL(pgurl string) string {
  function ParseDatname (line 104) | func ParseDatname(pgurl string) string {
  function ReplaceDatname (line 119) | func ReplaceDatname(pgurl, datname string) string {

FILE: exporter/pgurl_test.go
  function TestProcessPGURLKeepsEncodedQueryValues (line 10) | func TestProcessPGURLKeepsEncodedQueryValues(t *testing.T) {
  function TestShadowPGURLRedactsQueryPassword (line 33) | func TestShadowPGURLRedactsQueryPassword(t *testing.T) {
  function TestParseDatnameAndReplaceDatname (line 46) | func TestParseDatnameAndReplaceDatname(t *testing.T) {
  function TestRetrievePGURLPriority (line 68) | func TestRetrievePGURLPriority(t *testing.T) {

FILE: exporter/predicate_cache_test.go
  function TestPredicateCacheHitSkipsDBQuery (line 9) | func TestPredicateCacheHitSkipsDBQuery(t *testing.T) {
  function TestPredicateCacheMissTriggersDBQuery (line 37) | func TestPredicateCacheMissTriggersDBQuery(t *testing.T) {
  function TestPredicateCacheDisabledByTTLZero (line 57) | func TestPredicateCacheDisabledByTTLZero(t *testing.T) {

FILE: exporter/probehealth_pgbouncer_test.go
  type probeHealthTestDriver (line 18) | type probeHealthTestDriver struct
    method Open (line 20) | func (d probeHealthTestDriver) Open(name string) (driver.Conn, error) {
  type probeHealthTestConn (line 24) | type probeHealthTestConn struct
    method Prepare (line 26) | func (c *probeHealthTestConn) Prepare(query string) (driver.Stmt, erro...
    method Close (line 29) | func (c *probeHealthTestConn) Close() error              { return nil }
    method Begin (line 30) | func (c *probeHealthTestConn) Begin() (driver.Tx, error) { return nil,...
    method Ping (line 32) | func (c *probeHealthTestConn) Ping(ctx context.Context) error { return...
    method QueryContext (line 34) | func (c *probeHealthTestConn) QueryContext(ctx context.Context, query ...
  type probeHealthTestRows (line 41) | type probeHealthTestRows struct
    method Columns (line 43) | func (r *probeHealthTestRows) Columns() []string { return []string{"ve...
    method Close (line 44) | func (r *probeHealthTestRows) Close() error      { return nil }
    method Next (line 45) | func (r *probeHealthTestRows) Next(dest []driver.Value) error {
  function init (line 49) | func init() {
  function TestProbeHealthPgbouncerDoesNotPingAndTreatsNoRowsAsUp (line 53) | func TestProbeHealthPgbouncerDoesNotPingAndTreatsNoRowsAsUp(t *testing.T) {

FILE: exporter/prom_validate.go
  function validatePromLabelName (line 10) | func validatePromLabelName(name string) error {
  function validatePromMetricName (line 23) | func validatePromMetricName(name string) error {

FILE: exporter/query.go
  type Query (line 17) | type Query struct
    method MarshalYAML (line 119) | func (q *Query) MarshalYAML() string {
    method Explain (line 133) | func (q *Query) Explain() string {
    method HTML (line 145) | func (q *Query) HTML() string {
    method HasTag (line 158) | func (q *Query) HasTag(tag string) bool {
    method ColumnList (line 163) | func (q *Query) ColumnList() (res []*Column) {
    method LabelList (line 172) | func (q *Query) LabelList() []string {
    method MetricList (line 186) | func (q *Query) MetricList() (res []*MetricDesc) {
    method TimeoutDuration (line 196) | func (q *Query) TimeoutDuration() time.Duration {
  type PredicateQuery (line 46) | type PredicateQuery struct

FILE: exporter/query_column_test.go
  function makeSampleQuery (line 11) | func makeSampleQuery() *Query {
  function TestColumnPrometheusValueType (line 42) | func TestColumnPrometheusValueType(t *testing.T) {
  function TestColumnAndMetricDescString (line 61) | func TestColumnAndMetricDescString(t *testing.T) {
  function TestQueryHelpersAndRender (line 79) | func TestQueryHelpersAndRender(t *testing.T) {

FILE: exporter/reload_test.go
  function TestReloadUpdatesQueriesInPlace (line 9) | func TestReloadUpdatesQueriesInPlace(t *testing.T) {

FILE: exporter/server.go
  constant connMaxLifeTime (line 21) | connMaxLifeTime = 1 * time.Minute
  constant pgSQLStateCannotConnectNow (line 23) | pgSQLStateCannotConnectNow = "57P03"
  type Server (line 30) | type Server struct
    method GetConnectTimeout (line 89) | func (s *Server) GetConnectTimeout() time.Duration {
    method Name (line 97) | func (s *Server) Name() string {
    method Error (line 104) | func (s *Server) Error() error {
    method Check (line 109) | func (s *Server) Check() error {
    method ProbeHealth (line 118) | func (s *Server) ProbeHealth() (up, recovery, starting bool, err error) {
    method Plan (line 369) | func (s *Server) Plan(queries ...*Query) {
    method ResetStats (line 406) | func (s *Server) ResetStats() {
    method Compatible (line 430) | func (s *Server) Compatible(query *Query) (res bool, reason string) {
    method Explain (line 526) | func (s *Server) Explain() string {
    method Stat (line 537) | func (s *Server) Stat() string {
    method ExplainHTML (line 563) | func (s *Server) ExplainHTML() string {
    method Describe (line 574) | func (s *Server) Describe(ch chan<- *prometheus.Desc) {
    method Collect (line 583) | func (s *Server) Collect(ch chan<- prometheus.Metric) {
    method collectFatalQueries (line 624) | func (s *Server) collectFatalQueries(ch chan<- prometheus.Metric) error {
    method collectNonFatalQueries (line 639) | func (s *Server) collectNonFatalQueries(ch chan<- prometheus.Metric) {
    method executeQuery (line 652) | func (s *Server) executeQuery(query *Collector, ch chan<- prometheus.M...
    method HasTag (line 680) | func (s *Server) HasTag(tag string) bool {
    method Duration (line 690) | func (s *Server) Duration() float64 {
    method Uptime (line 698) | func (s *Server) Uptime() float64 {
  function isPostgresStartupError (line 160) | func isPostgresStartupError(err error) bool {
  function PgbouncerPrecheck (line 166) | func PgbouncerPrecheck(s *Server) (err error) {
  function ParseSemver (line 233) | func ParseSemver(semverStr string) int {
  function PostgresPrecheck (line 260) | func PostgresPrecheck(s *Server) (err error) {
  function NewServer (line 705) | func NewServer(dsn string, opts ...ServerOpt) *Server {
  type ServerOpt (line 726) | type ServerOpt
  function WithConstLabel (line 729) | func WithConstLabel(labels prometheus.Labels) ServerOpt {
  function WithCachePolicy (line 743) | func WithCachePolicy(disableCache bool) ServerOpt {
  function WithQueries (line 750) | func WithQueries(queries map[string]*Query) ServerOpt {
  function WithServerTags (line 757) | func WithServerTags(tags []string) ServerOpt {
  function WithServerConnectTimeout (line 766) | func WithServerConnectTimeout(timeout int) ServerOpt {

FILE: exporter/server_exporter_test.go
  function makeGaugeQuery (line 11) | func makeGaugeQuery(name string, priority int, tags ...string) *Query {
  function TestParseSemver (line 28) | func TestParseSemver(t *testing.T) {
  function TestNewServerAndBasics (line 40) | func TestNewServerAndBasics(t *testing.T) {
  function TestServerCompatible (line 83) | func TestServerCompatible(t *testing.T) {
  function TestPlanResetAndCollectCached (line 135) | func TestPlanResetAndCollectCached(t *testing.T) {
  function TestExporterServerLifecycleHelpers (line 194) | func TestExporterServerLifecycleHelpers(t *testing.T) {

FILE: exporter/testmain_test.go
  function TestMain (line 8) | func TestMain(m *testing.M) {

FILE: exporter/utils.go
  function configureLogger (line 17) | func configureLogger(levelStr, formatStr string) *slog.Logger {
  function loggerOrDefault (line 50) | func loggerOrDefault() *slog.Logger {
  function logDebugf (line 58) | func logDebugf(format string, v ...interface{}) {
  function logInfof (line 63) | func logInfof(format string, v ...interface{}) {
  function logWarnf (line 68) | func logWarnf(format string, v ...interface{}) {
  function logErrorf (line 73) | func logErrorf(format string, v ...interface{}) {
  function logError (line 78) | func logError(msg string) {
  function logFatalf (line 83) | func logFatalf(format string, v ...interface{}) {
  function castFloat64 (line 92) | func castFloat64(t interface{}, c *Column) float64 {
  function castString (line 137) | func castString(t interface{}) string {
  function parseConstLabels (line 164) | func parseConstLabels(s string) prometheus.Labels {
  function parseCSV (line 197) | func parseCSV(s string) (tags []string) {

FILE: exporter/utils_test.go
  function TestParseCSV (line 10) | func TestParseCSV(t *testing.T) {
  function TestParseConstLabels (line 22) | func TestParseConstLabels(t *testing.T) {
  function TestCastFloat64 (line 43) | func TestCastFloat64(t *testing.T) {
  function TestCastString (line 98) | func TestCastString(t *testing.T) {
  function TestConfigureLogger (line 120) | func TestConfigureLogger(t *testing.T) {
  function TestLogHelpersWithNilLogger (line 132) | func TestLogHelpersWithNilLogger(t *testing.T) {

FILE: exporter/validate_labels.go
  function validateConstLabelConflicts (line 18) | func validateConstLabelConflicts(constLabels prometheus.Labels, queries ...

FILE: exporter/validate_labels_test.go
  function TestValidateConstLabelConflicts_QueryLabelOverlap (line 8) | func TestValidateConstLabelConflicts_QueryLabelOverlap(t *testing.T) {
  function TestValidateConstLabelConflicts_InternalReservedLabels (line 32) | func TestValidateConstLabelConflicts_InternalReservedLabels(t *testing.T) {
  function TestNewExporterRejectsConstLabelConflict (line 43) | func TestNewExporterRejectsConstLabelConflict(t *testing.T) {

FILE: main.go
  function main (line 22) | func main() {
Condensed preview — 158 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,654K chars).
[
  {
    "path": ".github/workflows/release.yaml",
    "chars": 1217,
    "preview": "name: Release\n\non:\n  push:\n    tags:\n      - 'v*.*.*'\n\npermissions:\n  contents: write\n\njobs:\n  release:\n    runs-on: ubu"
  },
  {
    "path": ".github/workflows/test-release.yaml",
    "chars": 1370,
    "preview": "name: Test Release\n\non:\n  workflow_dispatch:  # 允许手动触发\n  pull_request:\n    paths:\n      - '.goreleaser.yml'\n      - '.gi"
  },
  {
    "path": ".gitignore",
    "chars": 218,
    "preview": "# binary files\npg_exporter\n\n# tmp files\ntest/\ndeploy/\nupload.sh\ntemp/\ndist/\n.DS_Store\n\n# IDE files\n.vscode/\n.idea/\n.code"
  },
  {
    "path": ".goreleaser.yml",
    "chars": 6621,
    "preview": "version: 2\n\nenv:\n  - CGO_ENABLED=0\n\nbefore:\n  hooks:\n    - go mod download\n    - go mod tidy\n\nbuilds:\n  - id: pg_exporte"
  },
  {
    "path": "Dockerfile",
    "chars": 1365,
    "preview": "# syntax=docker/dockerfile:1\nFROM golang:1.26.2-alpine AS builder-env\n\nARG GOPROXY=https://proxy.golang.org,direct\nARG G"
  },
  {
    "path": "Dockerfile.goreleaser",
    "chars": 713,
    "preview": "# Dockerfile for goreleaser\n# This uses pre-built binaries from goreleaser instead of building from source\nFROM scratch\n"
  },
  {
    "path": "LICENSE",
    "chars": 11366,
    "preview": "\n                                 Apache License\n                           Version 2.0, January 2004\n                  "
  },
  {
    "path": "Makefile",
    "chars": 7354,
    "preview": "#==============================================================#\n# File      :   Makefile\n# Mtime     :   2025-08-14\n# L"
  },
  {
    "path": "README.md",
    "chars": 29804,
    "preview": "<p align=\"center\">\n  <img src=\"static/logo.png\" alt=\"PG Exporter Logo\" height=\"128\" align=\"middle\">\n</p>\n\n# PG EXPORTER\n"
  },
  {
    "path": "config/0000-doc.yml",
    "chars": 14884,
    "preview": "#==============================================================#\n# Desc      :   pg_exporter metrics collector definitio"
  },
  {
    "path": "config/0110-pg.yml",
    "chars": 5936,
    "preview": "#==============================================================#\n# 0110 pg\n#============================================"
  },
  {
    "path": "config/0120-pg_meta.yml",
    "chars": 4853,
    "preview": "#==============================================================#\n# 0120 pg_meta\n#======================================="
  },
  {
    "path": "config/0130-pg_setting.yml",
    "chars": 8494,
    "preview": "#==============================================================#\n# 0130 pg_setting\n#===================================="
  },
  {
    "path": "config/0210-pg_repl.yml",
    "chars": 9477,
    "preview": "#==============================================================#\n# 0210 pg_repl\n#======================================="
  },
  {
    "path": "config/0220-pg_sync_standby.yml",
    "chars": 780,
    "preview": "#==============================================================#\n# 0220 pg_sync_standby\n#==============================="
  },
  {
    "path": "config/0230-pg_downstream.yml",
    "chars": 793,
    "preview": "#==============================================================#\n# 0230 pg_downstream\n#================================="
  },
  {
    "path": "config/0240-pg_slot.yml",
    "chars": 19659,
    "preview": "#==============================================================#\n# 0240 pg_slot\n#======================================="
  },
  {
    "path": "config/0250-pg_recv.yml",
    "chars": 8190,
    "preview": "#==============================================================#\n# 0250 pg_recv\n#======================================="
  },
  {
    "path": "config/0260-pg_sub.yml",
    "chars": 5458,
    "preview": "#==============================================================#\n# 0260 pg_sub\n#========================================"
  },
  {
    "path": "config/0270-pg_origin.yml",
    "chars": 1015,
    "preview": "#==============================================================#\n# 0270 pg_origin\n#====================================="
  },
  {
    "path": "config/0300-pg_io.yml",
    "chars": 6242,
    "preview": "#==============================================================#\n# 0300 pg_io\n#========================================="
  },
  {
    "path": "config/0310-pg_size.yml",
    "chars": 825,
    "preview": "#==============================================================#\n# 0310 pg_size\n#======================================="
  },
  {
    "path": "config/0320-pg_archiver.yml",
    "chars": 1117,
    "preview": "#==============================================================#\n# 0320 pg_archiver\n#==================================="
  },
  {
    "path": "config/0330-pg_bgwriter.yml",
    "chars": 3017,
    "preview": "#==============================================================#\n# 0330 pg_bgwriter\n#==================================="
  },
  {
    "path": "config/0331-pg_checkpointer.yml",
    "chars": 5416,
    "preview": "#==============================================================#\n# 0331 pg_checkpointer\n#==============================="
  },
  {
    "path": "config/0340-pg_ssl.yml",
    "chars": 617,
    "preview": "#==============================================================#\n# 0340 pg_ssl\n#========================================"
  },
  {
    "path": "config/0350-pg_checkpoint.yml",
    "chars": 3004,
    "preview": "#==============================================================#\n# 0350 pg_checkpoint\n#================================="
  },
  {
    "path": "config/0355-pg_timeline.yml",
    "chars": 518,
    "preview": "#==============================================================#\n# 0355 pg_timeline\n#==================================="
  },
  {
    "path": "config/0360-pg_recovery.yml",
    "chars": 2663,
    "preview": "#==============================================================#\n# 0360 pg_recovery\n#==================================="
  },
  {
    "path": "config/0370-pg_slru.yml",
    "chars": 1397,
    "preview": "#==============================================================#\n# 0370 pg_slru\n#======================================="
  },
  {
    "path": "config/0380-pg_shmem.yml",
    "chars": 1108,
    "preview": "#==============================================================#\n# 0380 pg_shmem\n#======================================"
  },
  {
    "path": "config/0390-pg_wal.yml",
    "chars": 2651,
    "preview": "#==============================================================#\n# 0390 pg_wal\n#========================================"
  },
  {
    "path": "config/0410-pg_activity.yml",
    "chars": 1804,
    "preview": "#==============================================================#\n# 0410 pg_activity\n#==================================="
  },
  {
    "path": "config/0420-pg_wait.yml",
    "chars": 722,
    "preview": "#==============================================================#\n# 0420 pg_wait\n#======================================="
  },
  {
    "path": "config/0430-pg_backend.yml",
    "chars": 590,
    "preview": "#==============================================================#\n# 0430 pg_backend\n#===================================="
  },
  {
    "path": "config/0440-pg_xact.yml",
    "chars": 886,
    "preview": "#==============================================================#\n# 0440 pg_xact\n#======================================="
  },
  {
    "path": "config/0450-pg_lock.yml",
    "chars": 1104,
    "preview": "#==============================================================#\n# 0450 pg_lock\n#======================================="
  },
  {
    "path": "config/0460-pg_query.yml",
    "chars": 5960,
    "preview": "#==============================================================#\n# 0460 pg_query\n#======================================"
  },
  {
    "path": "config/0510-pg_vacuuming.yml",
    "chars": 3931,
    "preview": "#==============================================================#\n# 0510 pg_vacuuming\n#=================================="
  },
  {
    "path": "config/0520-pg_indexing.yml",
    "chars": 1411,
    "preview": "#==============================================================#\n# 0520 pg_indexing\n#==================================="
  },
  {
    "path": "config/0530-pg_clustering.yml",
    "chars": 970,
    "preview": "#==============================================================#\n# 0530 pg_clustering\n#================================="
  },
  {
    "path": "config/0540-pg_backup.yml",
    "chars": 890,
    "preview": "#==============================================================#\n# 0540 pg_backup\n#====================================="
  },
  {
    "path": "config/0610-pg_db.yml",
    "chars": 20169,
    "preview": "#==============================================================#\n# 0610 pg_db\n#========================================="
  },
  {
    "path": "config/0620-pg_db_confl.yml",
    "chars": 2566,
    "preview": "#==============================================================#\n# 0620 pg_db_confl\n#==================================="
  },
  {
    "path": "config/0640-pg_pubrel.yml",
    "chars": 721,
    "preview": "#==============================================================#\n# 0640 pg_pubrel\n#====================================="
  },
  {
    "path": "config/0650-pg_subrel.yml",
    "chars": 936,
    "preview": "#==============================================================#\n# 0650 pg_subrel\n#====================================="
  },
  {
    "path": "config/0700-pg_table.yml",
    "chars": 28434,
    "preview": "#==============================================================#\n# 0700 pg_table\n#======================================"
  },
  {
    "path": "config/0710-pg_index.yml",
    "chars": 2256,
    "preview": "#==============================================================#\n# 0710 pg_index\n#======================================"
  },
  {
    "path": "config/0720-pg_func.yml",
    "chars": 1111,
    "preview": "#==============================================================#\n# 0720 pg_func\n#======================================="
  },
  {
    "path": "config/0730-pg_seq.yml",
    "chars": 1051,
    "preview": "#==============================================================#\n# 0730 pg_seq\n#========================================"
  },
  {
    "path": "config/0740-pg_relkind.yml",
    "chars": 707,
    "preview": "#==============================================================#\n# 0740 pg_relkind\n#===================================="
  },
  {
    "path": "config/0750-pg_defpart.yml",
    "chars": 860,
    "preview": "#==============================================================#\n# 0750 pg_defpart\n#===================================="
  },
  {
    "path": "config/0810-pg_table_size.yml",
    "chars": 1480,
    "preview": "#==============================================================#\n# 0810 pg_table_size\n#================================="
  },
  {
    "path": "config/0820-pg_table_bloat.yml",
    "chars": 960,
    "preview": "#==============================================================#\n# 0820 pg_table_bloat\n#================================"
  },
  {
    "path": "config/0830-pg_index_bloat.yml",
    "chars": 935,
    "preview": "#==============================================================#\n# 0830 pg_index_bloat\n#================================"
  },
  {
    "path": "config/0910-pgbouncer_list.yml",
    "chars": 592,
    "preview": "#==============================================================#\n# 0910 pgbouncer_list\n#================================"
  },
  {
    "path": "config/0920-pgbouncer_database.yml",
    "chars": 7633,
    "preview": "#==============================================================#\n# 0920 pgbouncer_database\n#============================"
  },
  {
    "path": "config/0930-pgbouncer_stat.yml",
    "chars": 8601,
    "preview": "#==============================================================#\n# 0930 pgbouncer_stat\n#================================"
  },
  {
    "path": "config/0940-pgbouncer_pool.yml",
    "chars": 9893,
    "preview": "#==============================================================#\n# 0940 pgbouncer_pool\n#================================"
  },
  {
    "path": "config/1000-pg_wait_event.yml",
    "chars": 1363,
    "preview": "#==============================================================#\n# 1000 pg_wait_event\n#================================="
  },
  {
    "path": "config/1800-pg_tsdb_hypertable.yml",
    "chars": 1336,
    "preview": "#==============================================================#\n# 1800 pg_tsdb_hypertable\n#============================"
  },
  {
    "path": "config/1900-pg_citus.yml",
    "chars": 1477,
    "preview": "#==============================================================#\n# 1900 pg_citus_node\n#================================="
  },
  {
    "path": "config/2000-pg_heartbeat.yml",
    "chars": 1084,
    "preview": "#==============================================================#\n# 2000 heartbeat\n#====================================="
  },
  {
    "path": "docker/.dockerignore",
    "chars": 54,
    "preview": "*\n!go.mod\n!go.sum\n!main.go\n!exporter\n!pg_exporter.yml\n"
  },
  {
    "path": "docker/README.md",
    "chars": 3154,
    "preview": "# Docker Build Scripts\n\nThis directory contains scripts for building Docker images for pg_exporter.\n\n## Scripts\n\n### `bu"
  },
  {
    "path": "docker/build.sh",
    "chars": 2799,
    "preview": "#!/bin/bash\n#==============================================================#\n# File      :   docker/build.sh\n# Desc     "
  },
  {
    "path": "docker/release.sh",
    "chars": 5580,
    "preview": "#!/bin/bash\n#==============================================================#\n# File      :   docker/release.sh\n# Desc   "
  },
  {
    "path": "exporter/arg.go",
    "chars": 3908,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n\t\"github.com/prometheus/exporter"
  },
  {
    "path": "exporter/args_normalize.go",
    "chars": 2160,
    "preview": "package exporter\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n)\n\n// normalizeKingpinBoolEqualsAr"
  },
  {
    "path": "exporter/args_normalize_test.go",
    "chars": 1945,
    "preview": "package exporter\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n)\n\nfunc TestNormalizeKingpinBoolEq"
  },
  {
    "path": "exporter/collector.go",
    "chars": 12271,
    "preview": "package exporter\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/promethe"
  },
  {
    "path": "exporter/column.go",
    "chars": 3004,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n/* ======="
  },
  {
    "path": "exporter/concurrency_test.go",
    "chars": 2350,
    "preview": "package exporter\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc newMockExp"
  },
  {
    "path": "exporter/config.go",
    "chars": 8768,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"gopkg.in/yaml.v3\"\n)\n\n// GetConfig will try load c"
  },
  {
    "path": "exporter/config_coverage_pg9_test.go",
    "chars": 2249,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n// Ensure the legacy config (legacy/config) c"
  },
  {
    "path": "exporter/config_coverage_test.go",
    "chars": 2266,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n// Ensure the repo-bundled config/ covers PG1"
  },
  {
    "path": "exporter/config_merged_test.go",
    "chars": 2441,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"slices\"\n\t\"testing\"\n)\n\nfunc parseConfigDirLikeM"
  },
  {
    "path": "exporter/config_style_test.go",
    "chars": 2439,
    "preview": "package exporter\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar in"
  },
  {
    "path": "exporter/config_test.go",
    "chars": 4231,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n)\n\nfunc TestParseConfigUsageCaseInsensitive(t *testing.T) {"
  },
  {
    "path": "exporter/exporter.go",
    "chars": 30534,
    "preview": "package exporter\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/prometheus/cl"
  },
  {
    "path": "exporter/exporter_handlers_opts_test.go",
    "chars": 3451,
    "preview": "package exporter\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n)\n\nfunc TestExporterOpt"
  },
  {
    "path": "exporter/global.go",
    "chars": 834,
    "preview": "package exporter\n\nimport (\n\t\"log/slog\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n/* ================ Parameters ============="
  },
  {
    "path": "exporter/health_state_test.go",
    "chars": 1572,
    "preview": "package exporter\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/lib/pq\"\n)\n\nfunc TestIsPostgresStartupError(t *testi"
  },
  {
    "path": "exporter/main.go",
    "chars": 5988,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/"
  },
  {
    "path": "exporter/metrics_lifecycle_test.go",
    "chars": 4263,
    "preview": "package exporter\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc makeCachedColle"
  },
  {
    "path": "exporter/pgurl.go",
    "chars": 3942,
    "preview": "package exporter\n\nimport (\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n)\n\n// GetPGURL will retrieve, parse, modify postgres connection s"
  },
  {
    "path": "exporter/pgurl_test.go",
    "chars": 3696,
    "preview": "package exporter\n\nimport (\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n)\n\nfunc TestProcessPGURLKeepsEncodedQueryValues("
  },
  {
    "path": "exporter/predicate_cache_test.go",
    "chars": 2130,
    "preview": "package exporter\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestPredicateCacheHitSkipsDBQuery(t *testing.T) {\n\tq :="
  },
  {
    "path": "exporter/probehealth_pgbouncer_test.go",
    "chars": 2317,
    "preview": "package exporter\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"database/sql/driver\"\n\t\"errors\"\n\t\"io\"\n\t\"testing\"\n)\n\nvar errProbeH"
  },
  {
    "path": "exporter/prom_validate.go",
    "chars": 704,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/prometheus/common/model\"\n)\n\nfunc validatePromLabelName(name s"
  },
  {
    "path": "exporter/query.go",
    "chars": 7383,
    "preview": "package exporter\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\thtmltmpl \"html/template\"\n\t\"slices\"\n\ttexttmpl \"text/template\"\n\t\"time\"\n\n\t\"gopk"
  },
  {
    "path": "exporter/query_column_test.go",
    "chars": 3232,
    "preview": "package exporter\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc make"
  },
  {
    "path": "exporter/reload_signals_unix.go",
    "chars": 133,
    "preview": "//go:build !windows\n\npackage exporter\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar reloadSignals = []os.Signal{syscall.SIGHUP, sysc"
  },
  {
    "path": "exporter/reload_signals_windows.go",
    "chars": 115,
    "preview": "//go:build windows\n\npackage exporter\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar reloadSignals = []os.Signal{syscall.SIGHUP}\n"
  },
  {
    "path": "exporter/reload_test.go",
    "chars": 1709,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n)\n\nfunc TestReloadUpdatesQueriesInPlace(t *testing.T) {\n\tor"
  },
  {
    "path": "exporter/server.go",
    "chars": 24789,
    "preview": "package exporter\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n"
  },
  {
    "path": "exporter/server_exporter_test.go",
    "chars": 6885,
    "preview": "package exporter\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc make"
  },
  {
    "path": "exporter/testmain_test.go",
    "chars": 142,
    "preview": "package exporter\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestMain(m *testing.M) {\n\tLogger = configureLogger(\"error\", \"logfmt\""
  },
  {
    "path": "exporter/utils.go",
    "chars": 4618,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/prometheus/clien"
  },
  {
    "path": "exporter/utils_test.go",
    "chars": 4245,
    "preview": "package exporter\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestParseCSV(t *testing.T) {\n\tif got := parseCS"
  },
  {
    "path": "exporter/validate_labels.go",
    "chars": 1592,
    "preview": "package exporter\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// validateConstLabelCon"
  },
  {
    "path": "exporter/validate_labels_test.go",
    "chars": 1609,
    "preview": "package exporter\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestValidateConstLabelConflicts_QueryLabelOverlap(t *testing.T)"
  },
  {
    "path": "go.mod",
    "chars": 1520,
    "preview": "module pg_exporter\n\ngo 1.26.2\n\nrequire (\n\tgithub.com/alecthomas/kingpin/v2 v2.4.0\n\tgithub.com/lib/pq v1.12.3\n\tgithub.com"
  },
  {
    "path": "go.sum",
    "chars": 9159,
    "preview": "github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=\ngithub.com/alecthomas/kingpin/v2"
  },
  {
    "path": "hugo.yaml",
    "chars": 3589,
    "preview": "baseURL: 'https://exp.pgsty.com/'\nlanguageCode: 'en'\ntitle: 'PG Exporter - Advanced PostgreSQL & pgBouncer Metrics Expor"
  },
  {
    "path": "legacy/README.md",
    "chars": 934,
    "preview": "# Legacy Config Bundle (for PG 9.1 - 9.6)\n\nThis directory contains the **legacy pg_exporter config bundle** for **Postgr"
  },
  {
    "path": "legacy/config/0000-doc.yml",
    "chars": 2332,
    "preview": "#==============================================================#\n# Desc      :   pg_exporter metrics collector definitio"
  },
  {
    "path": "legacy/config/0110-pg.yml",
    "chars": 7122,
    "preview": "#==============================================================#\n# 0110 pg\n#============================================"
  },
  {
    "path": "legacy/config/0120-pg_meta.yml",
    "chars": 5245,
    "preview": "#==============================================================#\n# 0120 pg_meta\n#======================================="
  },
  {
    "path": "legacy/config/0130-pg_setting.yml",
    "chars": 9666,
    "preview": "#==============================================================#\n# 0130 pg_setting\n#===================================="
  },
  {
    "path": "legacy/config/0210-pg_repl.yml",
    "chars": 16946,
    "preview": "#==============================================================#\n# 0210 pg_repl\n#======================================="
  },
  {
    "path": "legacy/config/0220-pg_sync_standby.yml",
    "chars": 763,
    "preview": "#==============================================================#\n# 0220 pg_sync_standby\n#==============================="
  },
  {
    "path": "legacy/config/0230-pg_downstream.yml",
    "chars": 994,
    "preview": "#==============================================================#\n# 0230 pg_downstream\n#================================="
  },
  {
    "path": "legacy/config/0240-pg_slot.yml",
    "chars": 9158,
    "preview": "#==============================================================#\n# 0240 pg_slot\n#======================================="
  },
  {
    "path": "legacy/config/0250-pg_recv.yml",
    "chars": 3355,
    "preview": "#==============================================================#\n# 0250 pg_recv\n#======================================="
  },
  {
    "path": "legacy/config/0270-pg_origin.yml",
    "chars": 1354,
    "preview": "#==============================================================#\n# 0270 pg_origin\n#====================================="
  },
  {
    "path": "legacy/config/0310-pg_size.yml",
    "chars": 511,
    "preview": "#==============================================================#\n# 0310 pg_size\n#======================================="
  },
  {
    "path": "legacy/config/0320-pg_archiver.yml",
    "chars": 1116,
    "preview": "#==============================================================#\n# 0320 pg_archiver\n#==================================="
  },
  {
    "path": "legacy/config/0330-pg_bgwriter.yml",
    "chars": 4258,
    "preview": "#==============================================================#\n# 0330 pg_bgwriter\n#==================================="
  },
  {
    "path": "legacy/config/0331-pg_checkpointer.yml",
    "chars": 2952,
    "preview": "#==============================================================#\n# 0331 pg_checkpointer\n#==============================="
  },
  {
    "path": "legacy/config/0340-pg_ssl.yml",
    "chars": 616,
    "preview": "#==============================================================#\n# 0340 pg_ssl\n#========================================"
  },
  {
    "path": "legacy/config/0350-pg_checkpoint.yml",
    "chars": 3346,
    "preview": "#==============================================================#\n# 0350 pg_checkpoint\n#================================="
  },
  {
    "path": "legacy/config/0355-pg_timeline.yml",
    "chars": 517,
    "preview": "#==============================================================#\n# 0355 pg_timeline\n#==================================="
  },
  {
    "path": "legacy/config/0360-pg_recovery.yml",
    "chars": 1550,
    "preview": "#==============================================================#\n# 0360 pg_recovery\n#==================================="
  },
  {
    "path": "legacy/config/0410-pg_activity.yml",
    "chars": 3047,
    "preview": "#==============================================================#\n# 0410 pg_activity\n#==================================="
  },
  {
    "path": "legacy/config/0420-pg_wait.yml",
    "chars": 1346,
    "preview": "#==============================================================#\n# 0420 pg_wait\n#======================================="
  },
  {
    "path": "legacy/config/0440-pg_xact.yml",
    "chars": 905,
    "preview": "#==============================================================#\n# 0440 pg_xact\n#======================================="
  },
  {
    "path": "legacy/config/0450-pg_lock.yml",
    "chars": 1103,
    "preview": "#==============================================================#\n# 0450 pg_lock\n#======================================="
  },
  {
    "path": "legacy/config/0460-pg_query.yml",
    "chars": 3834,
    "preview": "#==============================================================#\n# 0460 pg_query\n#======================================"
  },
  {
    "path": "legacy/config/0610-pg_db.yml",
    "chars": 8448,
    "preview": "#==============================================================#\n# 0610 pg_db\n#========================================="
  },
  {
    "path": "legacy/config/0620-pg_db_confl.yml",
    "chars": 1362,
    "preview": "#==============================================================#\n# 0620 pg_db_confl\n#==================================="
  },
  {
    "path": "legacy/config/0700-pg_table.yml",
    "chars": 13300,
    "preview": "#==============================================================#\n# 0700 pg_table\n#======================================"
  },
  {
    "path": "legacy/config/0710-pg_index.yml",
    "chars": 2293,
    "preview": "#==============================================================#\n# 0710 pg_index\n#======================================"
  },
  {
    "path": "legacy/config/0720-pg_func.yml",
    "chars": 1110,
    "preview": "#==============================================================#\n# 0720 pg_func\n#======================================="
  },
  {
    "path": "legacy/config/0740-pg_relkind.yml",
    "chars": 603,
    "preview": "#==============================================================#\n# 0740 pg_relkind\n#===================================="
  },
  {
    "path": "legacy/config/0810-pg_table_size.yml",
    "chars": 1479,
    "preview": "#==============================================================#\n# 0810 pg_table_size\n#================================="
  },
  {
    "path": "legacy/config/0820-pg_table_bloat.yml",
    "chars": 959,
    "preview": "#==============================================================#\n# 0820 pg_table_bloat\n#================================"
  },
  {
    "path": "legacy/config/0830-pg_index_bloat.yml",
    "chars": 959,
    "preview": "#==============================================================#\n# 0830 pg_index_bloat\n#================================"
  },
  {
    "path": "legacy/config/0910-pgbouncer_list.yml",
    "chars": 592,
    "preview": "#==============================================================#\n# 0910 pgbouncer_list\n#================================"
  },
  {
    "path": "legacy/config/0920-pgbouncer_database.yml",
    "chars": 7633,
    "preview": "#==============================================================#\n# 0920 pgbouncer_database\n#============================"
  },
  {
    "path": "legacy/config/0930-pgbouncer_stat.yml",
    "chars": 8601,
    "preview": "#==============================================================#\n# 0930 pgbouncer_stat\n#================================"
  },
  {
    "path": "legacy/config/0940-pgbouncer_pool.yml",
    "chars": 9893,
    "preview": "#==============================================================#\n# 0940 pgbouncer_pool\n#================================"
  },
  {
    "path": "legacy/config/1800-pg_tsdb_hypertable.yml",
    "chars": 1335,
    "preview": "#==============================================================#\n# 1800 pg_tsdb_hypertable\n#============================"
  },
  {
    "path": "legacy/config/1900-pg_citus.yml",
    "chars": 1476,
    "preview": "#==============================================================#\n# 1900 pg_citus_node\n#================================="
  },
  {
    "path": "legacy/config/2000-pg_heartbeat.yml",
    "chars": 1083,
    "preview": "#==============================================================#\n# 2000 heartbeat\n#====================================="
  },
  {
    "path": "legacy/pg_exporter.yml",
    "chars": 143162,
    "preview": "#==============================================================#\n# Desc      :   pg_exporter metrics collector definitio"
  },
  {
    "path": "main.go",
    "chars": 872,
    "preview": "/***********************************************************************\\\nCopyright © 2018-2026 Ruohang Feng <rh@vonng.c"
  },
  {
    "path": "monitor/initdb.sh",
    "chars": 21285,
    "preview": "#!/bin/bash\nset -euo pipefail\n#==============================================================#\n# File      :   initdb.sh"
  },
  {
    "path": "monitor/pgrds-instance.json",
    "chars": 426628,
    "preview": "{\n    \"annotations\": {\n        \"list\": [\n            {\n                \"builtIn\": 1,\n                \"datasource\": {\n   "
  },
  {
    "path": "monitor/pgsql-exporter.json",
    "chars": 129953,
    "preview": "{\n    \"annotations\": {\n        \"list\": [\n            {\n                \"builtIn\": 1,\n                \"datasource\": {\n   "
  },
  {
    "path": "package/nfpm-amd64-deb.yaml",
    "chars": 1259,
    "preview": "name: \"pg-exporter\"\narch: \"amd64\"\nplatform: \"linux\"\nversion: \"v1.2.2\"\nrelease: \"1\"\nversion_schema: semver\nmaintainer: Ru"
  },
  {
    "path": "package/nfpm-amd64-rpm.yaml",
    "chars": 1263,
    "preview": "name: \"pg_exporter\"\narch: \"amd64\"\nplatform: \"linux\"\nversion: \"v1.2.2\"\nrelease: \"1\"\nversion_schema: semver\nmaintainer: Ru"
  },
  {
    "path": "package/nfpm-arm64-deb.yaml",
    "chars": 1259,
    "preview": "name: \"pg-exporter\"\narch: \"arm64\"\nplatform: \"linux\"\nversion: \"v1.2.2\"\nrelease: \"1\"\nversion_schema: semver\nmaintainer: Ru"
  },
  {
    "path": "package/nfpm-arm64-rpm.yaml",
    "chars": 1263,
    "preview": "name: \"pg_exporter\"\narch: \"arm64\"\nplatform: \"linux\"\nversion: \"v1.2.2\"\nrelease: \"1\"\nversion_schema: semver\nmaintainer: Ru"
  },
  {
    "path": "package/pg_exporter.default",
    "chars": 448,
    "preview": "PG_EXPORTER_URL='postgres://:5432/?sslmode=disable'\nPG_EXPORTER_CONFIG=/etc/pg_exporter.yml\nPG_EXPORTER_LABEL=\"\"\nPG_EXPO"
  },
  {
    "path": "package/pg_exporter.service",
    "chars": 323,
    "preview": "[Unit]\nDescription=Prometheus exporter for PostgreSQL/Pgbouncer server metrics\nDocumentation=https://pigsty.io/docs/pg_e"
  },
  {
    "path": "package/preinstall.sh",
    "chars": 264,
    "preview": "#!/bin/bash\n\n# create a group & user named prometheus if not exists\ngetent group prometheus >/dev/null || groupadd -r pr"
  },
  {
    "path": "pg_exporter.yml",
    "chars": 220547,
    "preview": "#==============================================================#\n# Desc      :   pg_exporter metrics collector definitio"
  }
]

About this extraction

This page contains the full source code of the Vonng/pg_exporter GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 158 files (1.5 MB), approximately 351.8k tokens, and a symbol index with 226 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!