[
  {
    "path": ".github/CONTRIBUTING.md",
    "content": "# Contributing\n\nWhen contributing to this repository, please open an issue with a description of the problem you wish to solve, prior to sending a pull request.\n\n## Contributing Code\n\nPlease ensure that all code is formatted prior to committing.\n\n### Commit messages\n\nCommits to this repository should have messages that conform to the [AngularJS Git Commit Guidelines](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines)."
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "# This is a basic workflow to help you get started with Actions\nname: Release\n\n# Controls when the action will run. Triggers the workflow on push or pull request\n# events but only for the master branch\non:\n  push:\n    branches: [\"master\"]\n\nenv:\n  PARALLELISM: 3\n\n# A workflow run is made up of one or more jobs that can run sequentially or in parallel\njobs:\n  # This workflow contains a single job called \"release\"\n  release:\n    # The type of runner that the job will run on\n    runs-on: ubuntu-latest\n\n    # Steps represent a sequence of tasks that will be executed as part of the job\n    steps:\n      - name: Go Report Card\n        uses: creekorful/goreportcard-action@v1.0\n\n      # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it\n      - name: Checkout\n        id: checkout\n        uses: actions/checkout@v2\n        with:\n          # Fetch all versions for tag/changelog generation\n          fetch-depth: 0\n\n      - name: Set up Go\n        uses: actions/setup-go@v4\n        with:\n          go-version: 1.24.2\n\n      - name: Install promu\n        id: make_promu\n        run: |\n          make promu\n\n      - name: Calculate Version\n        id: calculate_version\n        uses: mathieudutour/github-tag-action@v4.5\n        with:\n          github_token: ${{ secrets.GITHUB_TOKEN }}\n          dry_run: true\n\n      - name: Update Version\n        id: update_version\n        env:\n          NEW_VERSION: ${{ steps.calculate_version.outputs.new_version }}\n        run: |\n          echo \"${NEW_VERSION}\" > VERSION\n\n      - name: Update Changelog\n        id: update_changelog\n        env:\n          CHANGELOG: ${{ steps.calculate_version.outputs.changelog }}\n        run: |\n          mv CHANGELOG.md _CHANGELOG.md || touch _CHANGELOG.md\n          echo \"${CHANGELOG}\" > CHANGELOG.md\n          cat _CHANGELOG.md >> CHANGELOG.md\n          rm -f _CHANGELOG.md\n\n      - name: Commit Changes\n        id: commit_changes\n        uses: EndBug/add-and-commit@v9.1.1\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          add: VERSION CHANGELOG.md\n          message: |\n            chore(build): Releasing ${{ steps.calculate_version.outputs.new_tag }}\n\n      - name: Commit Tag\n        id: commit_tag\n        uses: mathieudutour/github-tag-action@v6.1\n        with:\n          github_token: ${{ secrets.GITHUB_TOKEN }}\n          commit_sha: ${{ steps.commit_changes.outputs.commit_long_sha }}\n\n      - name: Build\n        id: build\n        run: |\n          promu crossbuild --parallelism $PARALLELISM\n          promu crossbuild --parallelism $PARALLELISM tarballs\n          promu checksum .tarballs\n\n      - name: Create Release\n        id: create_release\n        uses: actions/create-release@v1\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          tag_name: ${{ steps.calculate_version.outputs.new_tag }}\n          release_name: Release ${{ steps.calculate_version.outputs.new_tag }}\n          body: |\n            Changes in this release:\n            ${{ steps.calculate_version.outputs.changelog }}\n          draft: false\n          prerelease: false\n\n      - name: Upload Release Assets\n        id: upload_release_assets\n        uses: AButler/upload-release-assets@v2.0\n        with:\n          files: \".tarballs/*\"\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n          release-tag: ${{ steps.calculate_version.outputs.new_tag }}\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Test\n\n# Controls when the action will run. Triggers the workflow on push or pull request\n# events but only for the master branch\non:\n  pull_request:\n    branches:\n      - master\n  push:\n    branches:\n      - master\n\n# A workflow run is made up of one or more jobs that can run sequentially or in parallel\njobs:\n  # This workflow contains a single job called \"test\"\n  test:\n    # The type of runner that the job will run on\n    runs-on: ubuntu-latest\n\n    # Steps represent a sequence of tasks that will be executed as part of the job\n    steps:\n      # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it\n      - name: Checkout\n        id: checkout\n        uses: actions/checkout@v2\n        with:\n          # Fetch all versions for tag/changelog generation\n          fetch-depth: 0\n\n      - name: Set up Go\n        uses: actions/setup-go@v4\n        with:\n          go-version: 1.24.2\n\n      - name: Test\n        id: test\n        run: |\n          make test\n"
  },
  {
    "path": ".gitignore",
    "content": "zfs_exporter\n.build/\n.tarballs/\n"
  },
  {
    "path": ".golangci.yml",
    "content": "version: \"2\"\nlinters:\n  enable:\n    - errorlint\n    - misspell\n    - perfsprint\n    - revive\n    - testifylint\n  settings:\n    perfsprint:\n      # Optimizes even if it requires an int or uint type cast.\n      int-conversion: true\n      # Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.\n      err-error: true\n      # Optimizes `fmt.Errorf`.\n      errorf: true\n      # Optimizes `fmt.Sprintf` with only one argument.\n      sprintf1: true\n      # Optimizes into strings concatenation.\n      strconcat: false\n    revive:\n      rules:\n        # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter\n        - name: unused-parameter\n          severity: warning\n          disabled: true\n    testifylint:\n      enable-all: true\n      disable:\n        - go-require\n      formatter:\n        require-f-funcs: true\n  exclusions:\n    generated: lax\n    presets:\n      - comments\n      - common-false-positives\n      - legacy\n      - std-error-handling\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\nissues:\n  max-issues-per-linter: 0\n  max-same-issues: 0\nformatters:\n  enable:\n    - gofumpt\n    - goimports\n  settings:\n    goimports:\n      local-prefixes:\n        - github.com/prometheus/common\n  exclusions:\n    generated: lax\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\n"
  },
  {
    "path": ".promu.yml",
    "content": "go:\n  # Whenever the Go version is updated here,\n  # .circle/config.yml should also be updated.\n  version: 1.23\nrepository:\n  path: github.com/pdf/zfs_exporter/v2\nbuild:\n  flags: -a -tags netgo\n  ldflags: |\n    -X github.com/prometheus/common/version.Version={{.Version}}\n    -X github.com/prometheus/common/version.Revision={{.Revision}}\n    -X github.com/prometheus/common/version.Branch={{.Branch}}\n    -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}\n    -X github.com/prometheus/common/version.BuildDate={{date \"20060102-15:04:05\"}}\ncrossbuild:\n  platforms:\n    - linux\n    - illumos\n    - darwin\n    - freebsd\n    - netbsd\n    - dragonfly\ntarball:\n  files:\n    - LICENSE\n    - CHANGELOG.md\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "## [2.3.12](https://github.com/pdf/zfs_exporter/compare/v2.3.11...v2.3.12) (2026-04-04)\n\n\n### Bug Fixes\n\n* **docs:** Update installation command for zfs_exporter to v2 ([#66](https://github.com/pdf/zfs_exporter/issues/66)) ([1769a9e](https://github.com/pdf/zfs_exporter/commit/1769a9e))\n\n\n\n\n## [2.3.11](https://github.com/pdf/zfs_exporter/compare/v2.3.10...v2.3.11) (2025-11-24)\n\n\n### Bug Fixes\n\n* **security:** Bump deps for CVE-2025-58181 ([12cf70c](https://github.com/pdf/zfs_exporter/commit/12cf70c))\n\n\n\n\n## [2.3.10](https://github.com/pdf/zfs_exporter/compare/v2.3.9...v2.3.10) (2025-08-24)\n\n\n### Bug Fixes\n\n* **props:** Fix filesystem creation property ([9a6beb3](https://github.com/pdf/zfs_exporter/commit/9a6beb3)), closes [#57](https://github.com/pdf/zfs_exporter/issues/57)\n\n\n\n\n## [2.3.9](https://github.com/pdf/zfs_exporter/compare/v2.3.8...v2.3.9) (2025-08-24)\n\n\n### Bug Fixes\n\n* **props:** Add support for dataset `creation` property ([a1c90f4](https://github.com/pdf/zfs_exporter/commit/a1c90f4)), closes [#57](https://github.com/pdf/zfs_exporter/issues/57)\n\n\n\n\n## [2.3.8](https://github.com/pdf/zfs_exporter/compare/v2.3.7...v2.3.8) (2025-04-20)\n\n\n### Bug Fixes\n\n* **build:** Bump Go version and golangci-lint ([4d46ab3](https://github.com/pdf/zfs_exporter/commit/4d46ab3))\n\n\n\n\n## [2.3.7](https://github.com/pdf/zfs_exporter/compare/v2.3.6...v2.3.7) (2025-04-20)\n\n\n### Bug Fixes\n\n* **deps:** Bump dependencies ([6af54d2](https://github.com/pdf/zfs_exporter/commit/6af54d2))\n\n\n\n\n## [2.3.6](https://github.com/pdf/zfs_exporter/compare/v2.3.5...v2.3.6) (2025-01-18)\n\n\n### Bug Fixes\n\n* **build:** Bump Go version in actions ([00498df](https://github.com/pdf/zfs_exporter/commit/00498df))\n\n\n\n\n## [2.3.5](https://github.com/pdf/zfs_exporter/compare/v2.3.4...v2.3.5) (2025-01-18)\n\n\n### Bug Fixes\n\n* **core:** Bump dependencies, migrate to promslog ([ccc2b21](https://github.com/pdf/zfs_exporter/commit/ccc2b21))\n\n\n\n\n## [2.3.4](https://github.com/pdf/zfs_exporter/compare/v2.3.3...v2.3.4) (2024-04-13)\n\n\n### Bug Fixes\n\n* **deps:** Bump deps for security ([1404536](https://github.com/pdf/zfs_exporter/commit/1404536))\n\n\n\n\n## [2.3.3](https://github.com/pdf/zfs_exporter/compare/v2.3.2...v2.3.3) (2024-04-13)\n\n\n### Bug Fixes\n\n* **log:** Improve command execution error output ([2277832](https://github.com/pdf/zfs_exporter/commit/2277832))\n\n\n\n\n## [2.3.2](https://github.com/pdf/zfs_exporter/compare/v2.3.1...v2.3.2) (2023-10-13)\n\n\n\n\n## [2.3.1](https://github.com/pdf/zfs_exporter/compare/v2.3.0...v2.3.1) (2023-08-12)\n\n\n### Bug Fixes\n\n* **build:** Update deps ([ddf8e09](https://github.com/pdf/zfs_exporter/commit/ddf8e09))\n\n\n\n\n# [2.3.0](https://github.com/pdf/zfs_exporter/compare/v2.2.8...v2.3.0) (2023-08-12)\n\n\n### Features\n\n* **server:** Add exporter toolkit for TLS support ([8102e2e](https://github.com/pdf/zfs_exporter/commit/8102e2e)), closes [#34](https://github.com/pdf/zfs_exporter/issues/34)\n\n\n\n\n## [2.2.8](https://github.com/pdf/zfs_exporter/compare/v2.2.7...v2.2.8) (2023-04-22)\n\n\n### Bug Fixes\n\n* **build:** Tag correct commit SHA ([0712333](https://github.com/pdf/zfs_exporter/commit/0712333))\n* **security:** Update dependencies for upstream vulnerabilities ([2220da2](https://github.com/pdf/zfs_exporter/commit/2220da2))\n\n\n\n\n## [2.2.7](https://github.com/pdf/zfs_exporter/compare/v2.2.6...v2.2.7) (2023-01-28)\n\n\n### Bug Fixes\n\n* **transform:** Add support for ancient ZFS dedupratio metric ([85bdc3b](https://github.com/pdf/zfs_exporter/commit/85bdc3b)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26)\n\n\n\n\n## [2.2.6](https://github.com/pdf/zfs_exporter/compare/v2.2.5...v2.2.6) (2023-01-28)\n\n\n### Bug Fixes\n\n* **transform:** Add support for ancient ZFS fragmentation metric ([a0240d1](https://github.com/pdf/zfs_exporter/commit/a0240d1)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26)\n\n\n\n\n## [2.2.5](https://github.com/pdf/zfs_exporter/compare/v2.2.4...v2.2.5) (2022-01-30)\n\n\n### Bug Fixes\n\n* **core:** Correctly handle and report errors listing pools ([efbcceb](https://github.com/pdf/zfs_exporter/commit/efbcceb)), closes [#18](https://github.com/pdf/zfs_exporter/issues/18)\n\n\n\n\n## [2.2.4](https://github.com/pdf/zfs_exporter/compare/v2.2.3...v2.2.4) (2022-01-05)\n\n\n### Bug Fixes\n\n* **build:** Update promu config to build v2 ([2a38914](https://github.com/pdf/zfs_exporter/commit/2a38914))\n\n\n\n\n## [2.2.3](https://github.com/pdf/zfs_exporter/compare/v2.2.2...v2.2.3) (2022-01-05)\n\n\n### Bug Fixes\n\n* **build:** update go module version to match release tag major version ([f709083](https://github.com/pdf/zfs_exporter/commit/f709083))\n\n\n\n\n## [2.2.2](https://github.com/pdf/zfs_exporter/compare/v2.2.1...v2.2.2) (2021-11-16)\n\n\n### Bug Fixes\n\n* **metrics:** Fix typo in metric name ([bbd3d91](https://github.com/pdf/zfs_exporter/commit/bbd3d91))\n* **pool:** Add SUSPENDED status ([9b9e655](https://github.com/pdf/zfs_exporter/commit/9b9e655))\n* **tests:** Remove unnecessary duration conversion ([b6a29ab](https://github.com/pdf/zfs_exporter/commit/b6a29ab))\n\n\n\n\n## [2.2.1](https://github.com/pdf/zfs_exporter/compare/v2.2.0...v2.2.1) (2021-09-13)\n\n\n### Bug Fixes\n\n* **collector:** Avoid race on upstream channel close, tidy sync points ([e6fbdf5](https://github.com/pdf/zfs_exporter/commit/e6fbdf5))\n* **docs:** Document web.disable-exporter-metrics flag in README ([20182da](https://github.com/pdf/zfs_exporter/commit/20182da))\n\n\n\n\n# [2.2.0](https://github.com/pdf/zfs_exporter/compare/v2.1.1...v2.2.0) (2021-09-04)\n\n\n### Bug Fixes\n\n* **docs:** Correct misspelling ([066c7d2](https://github.com/pdf/zfs_exporter/commit/066c7d2))\n\n\n### Features\n\n* **metrics:** Allow disabling exporter metrics ([1ca8717](https://github.com/pdf/zfs_exporter/commit/1ca8717)), closes [#2](https://github.com/pdf/zfs_exporter/issues/2)\n\n\n\n\n## [2.1.1](https://github.com/pdf/zfs_exporter/compare/v2.1.0...v2.1.1) (2021-08-27)\n\n\n### Bug Fixes\n\n* **build:** Update to Go 1.17 for crossbuild, and enable all platforms ([f47b69a](https://github.com/pdf/zfs_exporter/commit/f47b69a))\n* **core:** Update dependencies ([b39382b](https://github.com/pdf/zfs_exporter/commit/b39382b))\n\n\n\n\n# [2.1.0](https://github.com/pdf/zfs_exporter/compare/v2.0.0...v2.1.0) (2021-08-18)\n\n\n### Bug Fixes\n\n* **logging:** Include collector in warning for unsupported properties ([1760a4a](https://github.com/pdf/zfs_exporter/commit/1760a4a))\n* **metrics:** Invert ratio for multiplier fields, and clarify their docs ([1a7bc3a](https://github.com/pdf/zfs_exporter/commit/1a7bc3a)), closes [#11](https://github.com/pdf/zfs_exporter/issues/11)\n\n\n### Features\n\n* **build:** Update to Go 1.17 ([b64115c](https://github.com/pdf/zfs_exporter/commit/b64115c))\n\n\n\n\n# [2.0.0](https://github.com/pdf/zfs_exporter/compare/v1.0.1...v2.0.0) (2021-08-14)\n\n\n### Code Refactoring\n\n* **collector:** Migrate to internal ZFS CLI implementation ([53b0e98](https://github.com/pdf/zfs_exporter/commit/53b0e98)), closes [#7](https://github.com/pdf/zfs_exporter/issues/7) [#9](https://github.com/pdf/zfs_exporter/issues/9) [#10](https://github.com/pdf/zfs_exporter/issues/10)\n\n\n### Features\n\n* **performance:** Execute collection concurrently per pool ([ccc6f22](https://github.com/pdf/zfs_exporter/commit/ccc6f22))\n* **zfs:** Add local ZFS CLI parsing ([f5050b1](https://github.com/pdf/zfs_exporter/commit/f5050b1))\n\n\n### BREAKING CHANGES\n\n* **collector:** Ratio values are now properly calculated in the range\n0-1, rather than being passed verbatim.\n\nThe following metrics are affected by this change:\n- zfs_pool_deduplication_ratio\n- zfs_pool_capacity_ratio\n- zfs_pool_fragmentation_ratio\n- zfs_dataset_compression_ratio\n- zfs_dataset_referenced_compression_ratio\n\nAdditionally, the zfs_dataset_fragmentation_percent metric has been\nrenamed to zfs_dataset_fragmentation_ratio.\n\n\n\n\n## [1.0.1](https://github.com/pdf/zfs_exporter/compare/v1.0.0...v1.0.1) (2021-08-03)\n\n\n### Bug Fixes\n\n* fix copy and paste errors when accessing dataset properties ([c0fc6b2](https://github.com/pdf/zfs_exporter/commit/c0fc6b2))\n\n\n\n\n# [1.0.0](https://github.com/pdf/zfs_exporter/compare/v0.0.3...v1.0.0) (2021-06-22)\n\n\n### Bug Fixes\n\n* **ci:** Fix syntax error in github actions workflow ([0b6e8bc](https://github.com/pdf/zfs_exporter/commit/0b6e8bc))\n\n\n### Code Refactoring\n\n* **core:** Update prometheus toolchain and refactor internals ([056b386](https://github.com/pdf/zfs_exporter/commit/056b386))\n\n\n### Features\n\n* **enhancement:** Allow excluding datasets by regular expression ([8dd48ba](https://github.com/pdf/zfs_exporter/commit/8dd48ba)), closes [#3](https://github.com/pdf/zfs_exporter/issues/3)\n\n\n### BREAKING CHANGES\n\n* **core:** Go API has changed somewhat, but metrics remain\nunaffected.\n\n\n\n\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Peter Fern\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE."
  },
  {
    "path": "Makefile",
    "content": "# Copyright 2015 The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Needs to be defined before including Makefile.common to auto-generate targets\nDOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x\nDOCKER_IMAGE_NAME ?= zfs-exporter\n\n.PHONY: all\nall:: test build\n\n.PHONY: test\ntest:: vet precheck style lint unused common-test\n\ninclude Makefile.common\n"
  },
  {
    "path": "Makefile.common",
    "content": "# Copyright 2018 The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# A common Makefile that includes rules to be reused in different prometheus projects.\n# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!\n\n# Example usage :\n# Create the main Makefile in the root project directory.\n# include Makefile.common\n# customTarget:\n# \t@echo \">> Running customTarget\"\n#\n\n# Ensure GOBIN is not set during build so that promu is installed to the correct path\nunexport GOBIN\n\nGO           ?= go\nGOFMT        ?= $(GO)fmt\nFIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))\nGOOPTS       ?=\nGOHOSTOS     ?= $(shell $(GO) env GOHOSTOS)\nGOHOSTARCH   ?= $(shell $(GO) env GOHOSTARCH)\n\nGO_VERSION        ?= $(shell $(GO) version)\nGO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))\nPRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\\.(10|[0-9])\\.')\n\nPROMU        := $(FIRST_GOPATH)/bin/promu\npkgs          = ./...\n\nifeq (arm, $(GOHOSTARCH))\n\tGOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)\n\tGO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)\nelse\n\tGO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)\nendif\n\nGOTEST := $(GO) test\nGOTEST_DIR :=\nifneq ($(CIRCLE_JOB),)\nifneq ($(shell command -v gotestsum 2> /dev/null),)\n\tGOTEST_DIR := test-results\n\tGOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --\nendif\nendif\n\nPROMU_VERSION ?= 0.17.0\nPROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz\n\nSKIP_GOLANGCI_LINT :=\nGOLANGCI_LINT :=\nGOLANGCI_LINT_OPTS ?=\nGOLANGCI_LINT_VERSION ?= v2.1.2\n# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.\n# windows isn't included here because of the path separator being different.\nifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))\n\tifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))\n\t\t# If we're in CI and there is an Actions file, that means the linter\n\t\t# is being run in Actions, so we don't need to run it here.\n\t\tifneq (,$(SKIP_GOLANGCI_LINT))\n\t\t\tGOLANGCI_LINT :=\n\t\telse ifeq (,$(CIRCLE_JOB))\n\t\t\tGOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint\n\t\telse ifeq (,$(wildcard .github/workflows/golangci-lint.yml))\n\t\t\tGOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint\n\t\tendif\n\tendif\nendif\n\nPREFIX                  ?= $(shell pwd)\nBIN_DIR                 ?= $(shell pwd)\nDOCKER_IMAGE_TAG        ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))\nDOCKERFILE_PATH         ?= ./Dockerfile\nDOCKERBUILD_CONTEXT     ?= ./\nDOCKER_REPO             ?= prom\n\nDOCKER_ARCHS            ?= amd64\n\nBUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))\nPUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))\nTAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))\n\nSANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))\n\nifeq ($(GOHOSTARCH),amd64)\n        ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))\n                # Only supported on amd64\n                test-flags := -race\n        endif\nendif\n\n# This rule is used to forward a target like \"build\" to \"common-build\".  This\n# allows a new \"build\" target to be defined in a Makefile which includes this\n# one and override \"common-build\" without override warnings.\n%: common-% ;\n\n.PHONY: common-all\ncommon-all: precheck style check_license lint yamllint unused build test\n\n.PHONY: common-style\ncommon-style:\n\t@echo \">> checking code style\"\n\t@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \\\n\tif [ -n \"$${fmtRes}\" ]; then \\\n\t\techo \"gofmt checking failed!\"; echo \"$${fmtRes}\"; echo; \\\n\t\techo \"Please ensure you are using $$($(GO) version) for formatting code.\"; \\\n\t\texit 1; \\\n\tfi\n\n.PHONY: common-check_license\ncommon-check_license:\n\t@echo \">> checking license header\"\n\t@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \\\n               awk 'NR<=3' $$file | grep -Eq \"(Copyright|generated|GENERATED)\" || echo $$file; \\\n       done); \\\n       if [ -n \"$${licRes}\" ]; then \\\n               echo \"license header checking failed:\"; echo \"$${licRes}\"; \\\n               exit 1; \\\n       fi\n\n.PHONY: common-deps\ncommon-deps:\n\t@echo \">> getting dependencies\"\n\t$(GO) mod download\n\n.PHONY: update-go-deps\nupdate-go-deps:\n\t@echo \">> updating Go dependencies\"\n\t@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \\\n\t\t$(GO) get -d $$m; \\\n\tdone\n\t$(GO) mod tidy\n\n.PHONY: common-test-short\ncommon-test-short: $(GOTEST_DIR)\n\t@echo \">> running short tests\"\n\t$(GOTEST) -short $(GOOPTS) $(pkgs)\n\n.PHONY: common-test\ncommon-test: $(GOTEST_DIR)\n\t@echo \">> running all tests\"\n\t$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)\n\n$(GOTEST_DIR):\n\t@mkdir -p $@\n\n.PHONY: common-format\ncommon-format:\n\t@echo \">> formatting code\"\n\t$(GO) fmt $(pkgs)\n\n.PHONY: common-vet\ncommon-vet:\n\t@echo \">> vetting code\"\n\t$(GO) vet $(GOOPTS) $(pkgs)\n\n.PHONY: common-lint\ncommon-lint: $(GOLANGCI_LINT)\nifdef GOLANGCI_LINT\n\t@echo \">> running golangci-lint\"\n\t$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)\nendif\n\n.PHONY: common-lint-fix\ncommon-lint-fix: $(GOLANGCI_LINT)\nifdef GOLANGCI_LINT\n\t@echo \">> running golangci-lint fix\"\n\t$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)\nendif\n\n.PHONY: common-yamllint\ncommon-yamllint:\n\t@echo \">> running yamllint on all YAML files in the repository\"\nifeq (, $(shell command -v yamllint 2> /dev/null))\n\t@echo \"yamllint not installed so skipping\"\nelse\n\tyamllint .\nendif\n\n# For backward-compatibility.\n.PHONY: common-staticcheck\ncommon-staticcheck: lint\n\n.PHONY: common-unused\ncommon-unused:\n\t@echo \">> running check for unused/missing packages in go.mod\"\n\t$(GO) mod tidy\n\t@git diff --exit-code -- go.sum go.mod\n\n.PHONY: common-build\ncommon-build: promu\n\t@echo \">> building binaries\"\n\t$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)\n\n.PHONY: common-tarball\ncommon-tarball: promu\n\t@echo \">> building release tarball\"\n\t$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)\n\n.PHONY: common-docker-repo-name\ncommon-docker-repo-name:\n\t@echo \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)\"\n\n.PHONY: common-docker $(BUILD_DOCKER_ARCHS)\ncommon-docker: $(BUILD_DOCKER_ARCHS)\n$(BUILD_DOCKER_ARCHS): common-docker-%:\n\tdocker build -t \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \\\n\t\t-f $(DOCKERFILE_PATH) \\\n\t\t--build-arg ARCH=\"$*\" \\\n\t\t--build-arg OS=\"linux\" \\\n\t\t$(DOCKERBUILD_CONTEXT)\n\n.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)\ncommon-docker-publish: $(PUBLISH_DOCKER_ARCHS)\n$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:\n\tdocker push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\"\n\nDOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))\n.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)\ncommon-docker-tag-latest: $(TAG_DOCKER_ARCHS)\n$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:\n\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest\"\n\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)\"\n\n.PHONY: common-docker-manifest\ncommon-docker-manifest:\n\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)\" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))\n\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)\"\n\n.PHONY: promu\npromu: $(PROMU)\n\n$(PROMU):\n\t$(eval PROMU_TMP := $(shell mktemp -d))\n\tcurl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)\n\tmkdir -p $(FIRST_GOPATH)/bin\n\tcp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu\n\trm -r $(PROMU_TMP)\n\n.PHONY: proto\nproto:\n\t@echo \">> generating code from proto files\"\n\t@./scripts/genproto.sh\n\nifdef GOLANGCI_LINT\n$(GOLANGCI_LINT):\n\tmkdir -p $(FIRST_GOPATH)/bin\n\tcurl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \\\n\t\t| sed -e '/install -d/d' \\\n\t\t| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)\nendif\n\n.PHONY: precheck\nprecheck::\n\ndefine PRECHECK_COMMAND_template =\nprecheck:: $(1)_precheck\n\nPRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))\n.PHONY: $(1)_precheck\n$(1)_precheck:\n\t@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \\\n\t\techo \"Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?\"; \\\n\t\texit 1; \\\n\tfi\nendef\n\ngovulncheck: install-govulncheck\n\tgovulncheck ./...\n\ninstall-govulncheck:\n\tcommand -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest\n"
  },
  {
    "path": "README.md",
    "content": "# ZFS Exporter\n\n[![Test](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml)\n[![Release](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml/badge.svg)](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml)\n[![Go Report Card](https://goreportcard.com/badge/github.com/pdf/zfs_exporter)](https://goreportcard.com/report/github.com/pdf/zfs_exporter)\n[![License](https://img.shields.io/badge/License-MIT-%23a31f34)](https://github.com/pdf/zfs_exporter/blob/master/LICENSE)\n\nPrometheus exporter for ZFS (pools, filesystems, snapshots and volumes). Other implementations exist, however performance can be quite variable, producing occasional timeouts (and associated alerts). This exporter was built with a few features aimed at allowing users to avoid collecting more than they need to, and to ensure timeouts cannot occur, but that we eventually return useful data:\n\n- **Pool selection** - allow the user to select which pools are collected\n- **Multiple collectors** - allow the user to select which data types are collected (pools, filesystems, snapshots and volumes)\n- **Property selection** - allow the user to select which properties are collected per data type (enabling only required properties will increase collector performance, by reducing metadata queries)\n- **Collection deadline and caching** - if the collection duration exceeds the configured deadline, cached data from the last run will be returned for any metrics that have not yet been collected, and the current collection run will continue in the background. Collections will not run concurrently, so that when a system is running slowly, we don't compound the problem - if an existing collection is still running, cached data will be returned.\n\n## Installation\n\nDownload the [latest release](https://github.com/pdf/zfs_exporter/releases/latest) for your platform, and unpack it somewhere on your filesystem.\n\nYou may also build the latest version using Go v1.11 - 1.17 via `go get`:\n\n```bash\ngo get -u github.com/pdf/zfs_exporter\n```\n\nInstallation can also be accomplished using `go install`:\n\n```bash\nversion=latest # or a specific version tag\ngo install github.com/pdf/zfs_exporter/v2@$version\n```\n\n## Usage\n\n```\nusage: zfs_exporter [<flags>]\n\n\nFlags:\n  -h, --[no-]help                Show context-sensitive help (also try --help-long and --help-man).\n      --[no-]collector.dataset-filesystem  \n                                 Enable the dataset-filesystem collector (default: enabled)\n      --properties.dataset-filesystem=\"available,logicalused,quota,referenced,used,usedbydataset,written\"  \n                                 Properties to include for the dataset-filesystem collector, comma-separated.\n      --[no-]collector.dataset-snapshot  \n                                 Enable the dataset-snapshot collector (default: disabled)\n      --properties.dataset-snapshot=\"logicalused,referenced,used,written\"  \n                                 Properties to include for the dataset-snapshot collector, comma-separated.\n      --[no-]collector.dataset-volume  \n                                 Enable the dataset-volume collector (default: enabled)\n      --properties.dataset-volume=\"available,logicalused,referenced,used,usedbydataset,volsize,written\"  \n                                 Properties to include for the dataset-volume collector, comma-separated.\n      --[no-]collector.pool      Enable the pool collector (default: enabled)\n      --properties.pool=\"allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size\"  \n                                 Properties to include for the pool collector, comma-separated.\n      --web.telemetry-path=\"/metrics\"  \n                                 Path under which to expose metrics.\n      --[no-]web.disable-exporter-metrics  \n                                 Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).\n      --deadline=8s              Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when\n                                 complete (default: 8s)\n      --pool=POOL ...            Name of the pool(s) to collect, repeat for multiple pools (default: all pools).\n      --exclude=EXCLUDE ...      Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times.\n      --[no-]web.systemd-socket  Use systemd socket activation listeners instead of port listeners (Linux only).\n      --web.listen-address=:9134 ...  \n                                 Addresses on which to expose metrics and web interface. Repeatable for multiple addresses. Examples: `:9100` or `[::1]:9100` for http, `vsock://:9100` for vsock\n      --web.config.file=\"\"       Path to configuration file that can enable TLS or authentication. See: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md\n      --log.level=info           Only log messages with the given severity or above. One of: [debug, info, warn, error]\n      --log.format=logfmt        Output format of log messages. One of: [logfmt, json]\n      --[no-]version             Show application version.\n```\n\nCollectors that are enabled by default can be negated by prefixing the flag with `--no-*`, ie:\n\n```\nzfs_exporter --no-collector.dataset-filesystem\n```\n\n## TLS endpoint\n\n**EXPERIMENTAL**\n\nThe exporter supports TLS via a new web configuration file.\n\n```console\n./zfs_exporter --web.config.file=web-config.yml\n```\n\nSee the [exporter-toolkit https package](https://github.com/prometheus/exporter-toolkit/blob/v0.1.0/https/README.md) for more details.\n\n## Caveats\n\nThe collector may need to be run as root on some platforms (ie - Linux prior to ZFS v0.7.0).\n\nWhilst inspiration was taken from some of the alternative ZFS collectors, metric names may not be compatible.\n\n## Alternatives\n\nIn no particular order, here are some alternative implementations:\n\n- https://github.com/eliothedeman/zfs_exporter\n- https://github.com/ncabatoff/zfs-exporter\n- https://github.com/eripa/prometheus-zfs\n"
  },
  {
    "path": "VERSION",
    "content": "2.3.12\n"
  },
  {
    "path": "collector/cache.go",
    "content": "package collector\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype metricCache struct {\n\tcache map[string]prometheus.Metric\n\tsync.RWMutex\n}\n\nfunc (c *metricCache) add(m metric) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.cache[m.name] = m.prometheus\n}\n\nfunc (c *metricCache) merge(other *metricCache) {\n\tif c == other {\n\t\treturn\n\t}\n\tc.Lock()\n\tother.RLock()\n\tdefer func() {\n\t\tother.RUnlock()\n\t\tc.Unlock()\n\t}()\n\tfor name, value := range other.cache {\n\t\tc.cache[name] = value\n\t}\n}\n\nfunc (c *metricCache) replace(other *metricCache) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.cache = other.cache\n}\n\nfunc (c *metricCache) index() map[string]struct{} {\n\tc.RLock()\n\tdefer c.RUnlock()\n\tindex := make(map[string]struct{}, len(c.cache))\n\tfor name := range c.cache {\n\t\tindex[name] = struct{}{}\n\t}\n\n\treturn index\n}\n\nfunc newMetricCache() *metricCache {\n\treturn &metricCache{cache: make(map[string]prometheus.Metric)}\n}\n"
  },
  {
    "path": "collector/collector.go",
    "content": "package collector\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tdefaultEnabled           = true\n\tdefaultDisabled          = false\n\tnamespace                = `zfs`\n\thelpDefaultStateEnabled  = `enabled`\n\thelpDefaultStateDisabled = `disabled`\n\n\tsubsystemDataset = `dataset`\n\tsubsystemPool    = `pool`\n\n\tpropertyUnsupportedDesc = `!!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!`\n\tpropertyUnsupportedMsg  = `Unsupported dataset property, results are likely to be undesirable`\n\thelpIssue               = `Please file an issue at https://github.com/pdf/zfs_exporter/issues`\n)\n\nvar (\n\tcollectorStates        = make(map[string]State)\n\tscrapeDurationDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_duration_seconds`)\n\tscrapeDurationDesc     = prometheus.NewDesc(\n\t\tscrapeDurationDescName,\n\t\t`zfs_exporter: Duration of a collector scrape.`,\n\t\t[]string{`collector`},\n\t\tnil,\n\t)\n\tscrapeSuccessDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_success`)\n\tscrapeSuccessDesc     = prometheus.NewDesc(\n\t\tscrapeSuccessDescName,\n\t\t`zfs_exporter: Whether a collector succeeded.`,\n\t\t[]string{`collector`},\n\t\tnil,\n\t)\n\n\terrUnsupportedProperty = errors.New(`unsupported property`)\n)\n\ntype factoryFunc func(l *slog.Logger, c zfs.Client, properties []string) (Collector, error)\n\ntype transformFunc func(string) (float64, error)\n\n// State holds metadata for managing collector status\ntype State struct {\n\tName       string\n\tEnabled    *bool\n\tProperties *string\n\tfactory    factoryFunc\n}\n\n// Collector defines the minimum functionality for registering a collector\ntype Collector interface {\n\tupdate(ch chan<- metric, pools []string, excludes regexpCollection) error\n\tdescribe(ch chan<- *prometheus.Desc)\n}\n\ntype metric struct {\n\tname       string\n\tprometheus prometheus.Metric\n}\n\ntype property struct {\n\tname      string\n\tdesc      *prometheus.Desc\n\ttransform transformFunc\n\tkind      prometheus.ValueType\n}\n\nfunc (p property) push(ch chan<- metric, value string, labelValues ...string) error {\n\tv, err := p.transform(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch <- metric{\n\t\tname: expandMetricName(p.name, labelValues...),\n\t\tprometheus: prometheus.MustNewConstMetric(\n\t\t\tp.desc,\n\t\t\tp.kind,\n\t\t\tv,\n\t\t\tlabelValues...,\n\t\t),\n\t}\n\n\treturn nil\n}\n\ntype propertyStore struct {\n\tdefaultSubsystem string\n\tdefaultLabels    []string\n\tstore            map[string]property\n}\n\nfunc (p *propertyStore) find(name string) (property, error) {\n\tprop, ok := p.store[name]\n\tif !ok {\n\t\tprop = newProperty(\n\t\t\tp.defaultSubsystem,\n\t\t\tname,\n\t\t\tpropertyUnsupportedDesc,\n\t\t\ttransformNumeric,\n\t\t\tprometheus.GaugeValue,\n\t\t\tp.defaultLabels...,\n\t\t)\n\t\treturn prop, errUnsupportedProperty\n\t}\n\treturn prop, nil\n}\n\nfunc registerCollector(collector string, isDefaultEnabled bool, defaultProps string, factory factoryFunc) {\n\thelpDefaultState := helpDefaultStateDisabled\n\tif isDefaultEnabled {\n\t\thelpDefaultState = helpDefaultStateEnabled\n\t}\n\n\tenabledFlagName := fmt.Sprintf(\"collector.%s\", collector)\n\tenabledFlagHelp := fmt.Sprintf(\"Enable the %s collector (default: %s)\", collector, helpDefaultState)\n\tenabledDefaultValue := strconv.FormatBool(isDefaultEnabled)\n\n\tpropsFlagName := fmt.Sprintf(\"properties.%s\", collector)\n\tpropsFlagHelp := fmt.Sprintf(\"Properties to include for the %s collector, comma-separated.\", collector)\n\n\tenabledFlag := kingpin.Flag(enabledFlagName, enabledFlagHelp).Default(enabledDefaultValue).Bool()\n\tpropsFlag := kingpin.Flag(propsFlagName, propsFlagHelp).Default(defaultProps).String()\n\n\tcollectorStates[collector] = State{\n\t\tEnabled:    enabledFlag,\n\t\tProperties: propsFlag,\n\t\tfactory:    factory,\n\t}\n}\n\nfunc expandMetricName(prefix string, context ...string) string {\n\treturn strings.Join(append(context, prefix), `-`)\n}\n\nfunc newProperty(subsystem, metricName, helpText string, transform transformFunc, kind prometheus.ValueType, labels ...string) property {\n\tname := prometheus.BuildFQName(namespace, subsystem, metricName)\n\treturn property{\n\t\tname:      name,\n\t\tdesc:      prometheus.NewDesc(name, helpText, labels, nil),\n\t\ttransform: transform,\n\t\tkind:      kind,\n\t}\n}\n"
  },
  {
    "path": "collector/collector_test.go",
    "content": "package collector\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/testutil\"\n)\n\nvar logger = slog.New(slog.NewTextHandler(io.Discard, nil))\n\nfunc callCollector(ctx context.Context, collector prometheus.Collector, metricResults []byte, metricNames []string) error {\n\tresult := make(chan error)\n\tgo func() {\n\t\tresult <- testutil.CollectAndCompare(collector, bytes.NewBuffer(metricResults), metricNames...)\n\t}()\n\n\tselect {\n\tcase err := <-result:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}\n\nfunc defaultConfig(z zfs.Client) ZFSConfig {\n\treturn ZFSConfig{\n\t\tDisableMetrics: true,\n\t\tDeadline:       5 * time.Minute,\n\t\tLogger:         logger,\n\t\tZFSClient:      z,\n\t}\n}\n\nfunc stringPointer(s string) *string {\n\treturn &s\n}\n\nfunc boolPointer(b bool) *bool {\n\treturn &b\n}\n"
  },
  {
    "path": "collector/dataset.go",
    "content": "package collector\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tdefaultFilesystemProps = `available,logicalused,quota,referenced,used,usedbydataset,written`\n\tdefaultSnapshotProps   = `logicalused,referenced,used,written`\n\tdefaultVolumeProps     = `available,logicalused,referenced,used,usedbydataset,volsize,written`\n)\n\nvar (\n\tdatasetLabels     = []string{`name`, `pool`, `type`}\n\tdatasetProperties = propertyStore{\n\t\tdefaultSubsystem: subsystemDataset,\n\t\tdefaultLabels:    datasetLabels,\n\t\tstore: map[string]property{\n\t\t\t`available`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`available_bytes`,\n\t\t\t\t`The amount of space in bytes available to the dataset and all its children.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`compressratio`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`compression_ratio`,\n\t\t\t\t`The ratio of compressed size vs uncompressed size for this dataset.`,\n\t\t\t\ttransformMultiplier,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`logicalused`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`logical_used_bytes`,\n\t\t\t\t`The amount of space in bytes that is \"logically\" consumed by this dataset and all its descendents. See the \"used_bytes\" property.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`logicalreferenced`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`logical_referenced_bytes`,\n\t\t\t\t`The amount of space that is \"logically\" accessible by this dataset. See the \"referenced_bytes\" property.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`quota`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`quota_bytes`,\n\t\t\t\t`The maximum amount of space in bytes this dataset and its descendents can consume.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`refcompressratio`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`referenced_compression_ratio`,\n\t\t\t\t`The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the \"compression_ratio\" property.`,\n\t\t\t\ttransformMultiplier,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`referenced`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`referenced_bytes`,\n\t\t\t\t`The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`refquota`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`referenced_quota_bytes`,\n\t\t\t\t`The maximum amount of space in bytes this dataset can consume.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`refreservation`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`referenced_reservation_bytes`,\n\t\t\t\t`The minimum amount of space in bytes guaranteed to this dataset.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`reservation`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`reservation_bytes`,\n\t\t\t\t`The minimum amount of space in bytes guaranteed to a dataset and its descendants.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`snapshot_count`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`snapshot_count_total`,\n\t\t\t\t`The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`snapshot_limit`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`snapshot_limit_total`,\n\t\t\t\t`The total limit on the number of snapshots that can be created on a dataset and its descendents.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`used`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`used_bytes`,\n\t\t\t\t`The amount of space in bytes consumed by this dataset and all its descendents.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`usedbychildren`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`used_by_children_bytes`,\n\t\t\t\t`The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`usedbydataset`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`used_by_dataset_bytes`,\n\t\t\t\t`The amount of space in bytes used by this dataset itself, which would be freed if the dataset were destroyed.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`usedbyrefreservation`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`used_by_referenced_reservation_bytes`,\n\t\t\t\t`The amount of space in bytes used by a refreservation set on this dataset, which would be freed if the refreservation was removed.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`usedbysnapshots`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`used_by_snapshot_bytes`,\n\t\t\t\t`The amount of space in bytes consumed by snapshots of this dataset.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`volsize`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`volume_size_bytes`,\n\t\t\t\t`The logical size in bytes of this volume.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`written`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`written_bytes`,\n\t\t\t\t`The amount of referenced space in bytes written to this dataset since the previous snapshot.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t\t`creation`: newProperty(\n\t\t\t\tsubsystemDataset,\n\t\t\t\t`creation_timestamp`,\n\t\t\t\t`The unix timestamp when this dataset was created.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tdatasetLabels...,\n\t\t\t),\n\t\t},\n\t}\n)\n\nfunc init() {\n\tregisterCollector(`dataset-filesystem`, defaultEnabled, defaultFilesystemProps, newFilesystemCollector)\n\tregisterCollector(`dataset-snapshot`, defaultDisabled, defaultSnapshotProps, newSnapshotCollector)\n\tregisterCollector(`dataset-volume`, defaultEnabled, defaultVolumeProps, newVolumeCollector)\n}\n\ntype datasetCollector struct {\n\tkind   zfs.DatasetKind\n\tlog    *slog.Logger\n\tclient zfs.Client\n\tprops  []string\n}\n\nfunc (c *datasetCollector) describe(ch chan<- *prometheus.Desc) {\n\tfor _, k := range c.props {\n\t\tprop, err := datasetProperties.find(k)\n\t\tif err != nil {\n\t\t\tc.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)\n\t\t\tcontinue\n\t\t}\n\t\tch <- prop.desc\n\t}\n}\n\nfunc (c *datasetCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error {\n\tvar wg sync.WaitGroup\n\terrChan := make(chan error, len(pools))\n\tfor _, pool := range pools {\n\t\twg.Add(1)\n\t\tgo func(pool string) {\n\t\t\tif err := c.updatePoolMetrics(ch, pool, excludes); err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(pool)\n\t}\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errChan:\n\t\treturn err\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool string, excludes regexpCollection) error {\n\tdatasets := c.client.Datasets(pool, c.kind)\n\tprops, err := datasets.Properties(c.props...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, dataset := range props {\n\t\tif excludes.MatchString(dataset.DatasetName()) {\n\t\t\tcontinue\n\t\t}\n\t\tif err = c.updateDatasetMetrics(ch, pool, dataset); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, dataset zfs.DatasetProperties) error {\n\tlabelValues := []string{dataset.DatasetName(), pool, string(c.kind)}\n\n\tfor k, v := range dataset.Properties() {\n\t\tprop, err := datasetProperties.find(k)\n\t\tif err != nil {\n\t\t\tc.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)\n\t\t}\n\t\tif err = prop.push(ch, v, labelValues...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc newDatasetCollector(kind zfs.DatasetKind, l *slog.Logger, c zfs.Client, props []string) (Collector, error) {\n\tswitch kind {\n\tcase zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown dataset type: %s\", kind)\n\t}\n\n\treturn &datasetCollector{kind: kind, log: l, client: c, props: props}, nil\n}\n\nfunc newFilesystemCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {\n\treturn newDatasetCollector(zfs.DatasetFilesystem, l, c, props)\n}\n\nfunc newSnapshotCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {\n\treturn newDatasetCollector(zfs.DatasetSnapshot, l, c, props)\n}\n\nfunc newVolumeCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {\n\treturn newDatasetCollector(zfs.DatasetVolume, l, c, props)\n}\n"
  },
  {
    "path": "collector/dataset_test.go",
    "content": "package collector\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs\"\n\t\"go.uber.org/mock/gomock\"\n)\n\ntype datasetResults struct {\n\tname    string\n\tresults map[string]string\n}\n\nfunc TestDatsetMetrics(t *testing.T) {\n\ttestCases := []struct {\n\t\tname           string\n\t\tkinds          []zfs.DatasetKind\n\t\tpools          []string\n\t\texplicitPools  []string\n\t\tpropsRequested []string\n\t\tmetricNames    []string\n\t\tpropsResults   map[string][]datasetResults\n\t\tmetricResults  string\n\t}{\n\t\t{\n\t\t\tname:           `all metrics`,\n\t\t\tkinds:          []zfs.DatasetKind{zfs.DatasetFilesystem},\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`available`, `compressratio`, `logicalused`, `logicalreferenced`, `quota`, `refcompressratio`, `referenced`, `refquota`, `refreservation`, `reservation`, `snapshot_count`, `snapshot_limit`, `used`, `usedbychildren`, `usedbydataset`, `usedbyrefreservation`, `usedbysnapshots`, `volsize`, `written`, `creation`},\n\t\t\tmetricNames:    []string{`zfs_dataset_available_bytes`, `zfs_dataset_compression_ratio`, `zfs_dataset_logical_used_bytes`, `zfs_dataset_logical_referenced_bytes`, `zfs_dataset_quota_bytes`, `zfs_dataset_referenced_compression_ratio`, `zfs_dataset_referenced_bytes`, `zfs_dataset_referenced_quota_bytes`, `zfs_dataset_reservation_bytes`, `zfs_dataset_snapshot_count_total`, `zfs_datset_snapshot_limit_total`, `zfs_dataset_used_bytes`, `zfs_dataset_used_by_children_bytes`, `zfs_dataset_used_by_datset_bytes`, `zfs_datset_used_by_referenced_reservation_bytes`, `zfs_dataset_used_by_snapshot_bytes`, `zfs_dataset_volume_size_bytes`, `zfs_dataset_written_bytes`},\n\t\t\tpropsResults: map[string][]datasetResults{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`:            `1024`,\n\t\t\t\t\t\t\t`compressratio`:        `2.50`,\n\t\t\t\t\t\t\t`logicalused`:          `1024`,\n\t\t\t\t\t\t\t`logicalreferenced`:    `512`,\n\t\t\t\t\t\t\t`quota`:                `512`,\n\t\t\t\t\t\t\t`refcompressratio`:     `24.00`,\n\t\t\t\t\t\t\t`referenced`:           `1024`,\n\t\t\t\t\t\t\t`refreservation`:       `1024`,\n\t\t\t\t\t\t\t`reservation`:          `1024`,\n\t\t\t\t\t\t\t`snapshot_count`:       `12`,\n\t\t\t\t\t\t\t`snapshot_limit`:       `24`,\n\t\t\t\t\t\t\t`used`:                 `1024`,\n\t\t\t\t\t\t\t`usedbychildren`:       `1024`,\n\t\t\t\t\t\t\t`usedbydataset`:        `1024`,\n\t\t\t\t\t\t\t`usedbyrefreservation`: `1024`,\n\t\t\t\t\t\t\t`usedbysnapshots`:      `1024`,\n\t\t\t\t\t\t\t`volsize`:              `1024`,\n\t\t\t\t\t\t\t`written`:              `1024`,\n\t\t\t\t\t\t\t`creation`:             `1756033110`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.\n# TYPE zfs_dataset_available_bytes gauge\nzfs_dataset_available_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_compression_ratio The ratio of compressed size vs uncompressed size for this dataset.\n# TYPE zfs_dataset_compression_ratio gauge\nzfs_dataset_compression_ratio{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 0.4\n# HELP zfs_dataset_logical_used_bytes The amount of space in bytes that is \"logically\" consumed by this dataset and all its descendents. See the \"used_bytes\" property.\n# TYPE zfs_dataset_logical_used_bytes gauge\nzfs_dataset_logical_used_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_logical_referenced_bytes The amount of space that is \"logically\" accessible by this dataset. See the \"referenced_bytes\" property.\n# TYPE zfs_dataset_logical_referenced_bytes gauge\nzfs_dataset_logical_referenced_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 512\n# HELP zfs_dataset_quota_bytes The maximum amount of space in bytes this dataset and its descendents can consume.\n# TYPE zfs_dataset_quota_bytes gauge\nzfs_dataset_quota_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 512\n# HELP zfs_dataset_referenced_bytes The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool.\n# TYPE zfs_dataset_referenced_bytes gauge\nzfs_dataset_referenced_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_referenced_compression_ratio The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the \"compression_ratio\" property.\n# TYPE zfs_dataset_referenced_compression_ratio gauge\nzfs_dataset_referenced_compression_ratio{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 0.041666666666666664\n# HELP zfs_dataset_reservation_bytes The minimum amount of space in bytes guaranteed to a dataset and its descendants.\n# TYPE zfs_dataset_reservation_bytes gauge\nzfs_dataset_reservation_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_snapshot_count_total The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides.\n# TYPE zfs_dataset_snapshot_count_total gauge\nzfs_dataset_snapshot_count_total{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 12\n# HELP zfs_dataset_used_by_children_bytes The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed.\n# TYPE zfs_dataset_used_by_children_bytes gauge\nzfs_dataset_used_by_children_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_used_by_snapshot_bytes The amount of space in bytes consumed by snapshots of this dataset.\n# TYPE zfs_dataset_used_by_snapshot_bytes gauge\nzfs_dataset_used_by_snapshot_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_used_bytes The amount of space in bytes consumed by this dataset and all its descendents.\n# TYPE zfs_dataset_used_bytes gauge\nzfs_dataset_used_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_volume_size_bytes The logical size in bytes of this volume.\n# TYPE zfs_dataset_volume_size_bytes gauge\nzfs_dataset_volume_size_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_written_bytes The amount of referenced space in bytes written to this dataset since the previous snapshot.\n# TYPE zfs_dataset_written_bytes gauge\nzfs_dataset_written_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n# HELP zfs_dataset_creation_timestamp The unix timestamp when this dataset was created.\n# TYPE zfs_dataset_creation_timestamp gauge\nzfs_dataset_creation_timestamp{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1756033110\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `multiple pools`,\n\t\t\tkinds:          []zfs.DatasetKind{zfs.DatasetFilesystem},\n\t\t\tpools:          []string{`testpool1`, `testpool2`},\n\t\t\tpropsRequested: []string{`available`},\n\t\t\tmetricNames:    []string{`zfs_dataset_available_bytes`},\n\t\t\tpropsResults: map[string][]datasetResults{\n\t\t\t\t`testpool1`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool1/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t`testpool2`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool2/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.\n# TYPE zfs_dataset_available_bytes gauge\nzfs_dataset_available_bytes{name=\"testpool1/test\",pool=\"testpool1\",type=\"filesystem\"} 1024\nzfs_dataset_available_bytes{name=\"testpool2/test\",pool=\"testpool2\",type=\"filesystem\"} 1024\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `explicit pools`,\n\t\t\tkinds:          []zfs.DatasetKind{zfs.DatasetFilesystem},\n\t\t\tpools:          []string{`testpool1`, `testpool2`},\n\t\t\texplicitPools:  []string{`testpool1`},\n\t\t\tpropsRequested: []string{`available`},\n\t\t\tmetricNames:    []string{`zfs_dataset_available_bytes`},\n\t\t\tpropsResults: map[string][]datasetResults{\n\t\t\t\t`testpool1`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool1/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t`testpool2`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool2/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.\n# TYPE zfs_dataset_available_bytes gauge\nzfs_dataset_available_bytes{name=\"testpool1/test\",pool=\"testpool1\",type=\"filesystem\"} 1024\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `multiple collectors`,\n\t\t\tkinds:          []zfs.DatasetKind{zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume},\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`available`},\n\t\t\tmetricNames:    []string{`zfs_dataset_available_bytes`},\n\t\t\tpropsResults: map[string][]datasetResults{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`available`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.\n# TYPE zfs_dataset_available_bytes gauge\nzfs_dataset_available_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\nzfs_dataset_available_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"snapshot\"} 1024\nzfs_dataset_available_bytes{name=\"testpool/test\",pool=\"testpool\",type=\"volume\"} 1024\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `unsupported metric`,\n\t\t\tkinds:          []zfs.DatasetKind{zfs.DatasetFilesystem},\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`unsupported`},\n\t\t\tmetricNames:    []string{`zfs_dataset_unsupported`},\n\t\t\tpropsResults: map[string][]datasetResults{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t{\n\t\t\t\t\t\tname: `testpool/test`,\n\t\t\t\t\t\tresults: map[string]string{\n\t\t\t\t\t\t\t`unsupported`: `1024`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_dataset_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!\n# TYPE zfs_dataset_unsupported gauge\nzfs_dataset_unsupported{name=\"testpool/test\",pool=\"testpool\",type=\"filesystem\"} 1024\n`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tctrl, ctx := gomock.WithContext(context.Background(), t)\n\t\t\tzfsClient := mock_zfs.NewMockClient(ctrl)\n\t\t\tconfig := defaultConfig(zfsClient)\n\t\t\tif tc.explicitPools != nil {\n\t\t\t\tconfig.Pools = tc.explicitPools\n\t\t\t}\n\n\t\t\tzfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1)\n\t\t\tcollector, err := NewZFS(config)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tcollector.Collectors = make(map[string]State)\n\n\t\t\tfor _, kind := range tc.kinds {\n\t\t\t\tswitch kind {\n\t\t\t\tcase zfs.DatasetFilesystem:\n\t\t\t\t\tcollector.Collectors[`dataset-filesystem`] = State{\n\t\t\t\t\t\tName:       \"dataset-filesystem\",\n\t\t\t\t\t\tEnabled:    boolPointer(true),\n\t\t\t\t\t\tProperties: stringPointer(strings.Join(tc.propsRequested, `,`)),\n\t\t\t\t\t\tfactory:    newFilesystemCollector,\n\t\t\t\t\t}\n\t\t\t\tcase zfs.DatasetSnapshot:\n\t\t\t\t\tcollector.Collectors[`dataset-snapshot`] = State{\n\t\t\t\t\t\tName:       \"dataset-snapshot\",\n\t\t\t\t\t\tEnabled:    boolPointer(true),\n\t\t\t\t\t\tProperties: stringPointer(strings.Join(tc.propsRequested, `,`)),\n\t\t\t\t\t\tfactory:    newSnapshotCollector,\n\t\t\t\t\t}\n\t\t\t\tcase zfs.DatasetVolume:\n\t\t\t\t\tcollector.Collectors[`dataset-volume`] = State{\n\t\t\t\t\t\tName:       \"dataset-volume\",\n\t\t\t\t\t\tEnabled:    boolPointer(true),\n\t\t\t\t\t\tProperties: stringPointer(strings.Join(tc.propsRequested, `,`)),\n\t\t\t\t\t\tfactory:    newVolumeCollector,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, pool := range tc.pools {\n\t\t\t\t\tif tc.explicitPools != nil {\n\t\t\t\t\t\twanted := false\n\t\t\t\t\t\tfor _, explicit := range tc.explicitPools {\n\t\t\t\t\t\t\tif pool == explicit {\n\t\t\t\t\t\t\t\twanted = true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !wanted {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tzfsDatasetResults := make([]zfs.DatasetProperties, len(tc.propsResults[pool]))\n\t\t\t\t\tfor i, propResults := range tc.propsResults[pool] {\n\t\t\t\t\t\tzfsDatasetProperties := mock_zfs.NewMockDatasetProperties(ctrl)\n\t\t\t\t\t\tzfsDatasetProperties.EXPECT().DatasetName().Return(propResults.name).Times(2)\n\t\t\t\t\t\tzfsDatasetProperties.EXPECT().Properties().Return(propResults.results).Times(1)\n\t\t\t\t\t\tzfsDatasetResults[i] = zfsDatasetProperties\n\t\t\t\t\t}\n\t\t\t\t\tzfsDatasets := mock_zfs.NewMockDatasets(ctrl)\n\t\t\t\t\tzfsDatasets.EXPECT().Properties(tc.propsRequested).Return(zfsDatasetResults, nil).Times(1)\n\t\t\t\t\tzfsClient.EXPECT().Datasets(pool, kind).Return(zfsDatasets).Times(1)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "collector/pool.go",
    "content": "package collector\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tdefaultPoolProps = `allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size`\n)\n\nvar (\n\tpoolLabels     = []string{`pool`}\n\tpoolProperties = propertyStore{\n\t\tdefaultSubsystem: subsystemPool,\n\t\tdefaultLabels:    poolLabels,\n\t\tstore: map[string]property{\n\t\t\t`allocated`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`allocated_bytes`,\n\t\t\t\t`Amount of storage in bytes used within the pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`dedupratio`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`deduplication_ratio`,\n\t\t\t\t`The ratio of deduplicated size vs undeduplicated size for data in this pool.`,\n\t\t\t\ttransformMultiplier,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`capacity`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`capacity_ratio`,\n\t\t\t\t`Ratio of pool space used.`,\n\t\t\t\ttransformPercentage,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`expandsize`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`expand_size_bytes`,\n\t\t\t\t`Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`fragmentation`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`fragmentation_ratio`,\n\t\t\t\t`The fragmentation ratio of the pool.`,\n\t\t\t\ttransformPercentage,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`free`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`free_bytes`,\n\t\t\t\t`The amount of free space in bytes available in the pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`freeing`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`freeing_bytes`,\n\t\t\t\t`The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`health`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`health`,\n\t\t\t\tfmt.Sprintf(\"Health status code for the pool [%d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s].\",\n\t\t\t\t\tpoolOnline, zfs.PoolOnline,\n\t\t\t\t\tpoolDegraded, zfs.PoolDegraded,\n\t\t\t\t\tpoolFaulted, zfs.PoolFaulted,\n\t\t\t\t\tpoolOffline, zfs.PoolOffline,\n\t\t\t\t\tpoolUnavail, zfs.PoolUnavail,\n\t\t\t\t\tpoolRemoved, zfs.PoolRemoved,\n\t\t\t\t\tpoolSuspended, zfs.PoolSuspended,\n\t\t\t\t),\n\t\t\t\ttransformHealthCode,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`leaked`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`leaked_bytes`,\n\t\t\t\t`Number of leaked bytes in the pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`readonly`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`readonly`,\n\t\t\t\t`Read-only status of the pool [0: read-write, 1: read-only].`,\n\t\t\t\ttransformBool,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t\t`size`: newProperty(\n\t\t\t\tsubsystemPool,\n\t\t\t\t`size_bytes`,\n\t\t\t\t`Total size in bytes of the storage pool.`,\n\t\t\t\ttransformNumeric,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tpoolLabels...,\n\t\t\t),\n\t\t},\n\t}\n)\n\nfunc init() {\n\tregisterCollector(`pool`, defaultEnabled, defaultPoolProps, newPoolCollector)\n}\n\ntype poolCollector struct {\n\tlog    *slog.Logger\n\tclient zfs.Client\n\tprops  []string\n}\n\nfunc (c *poolCollector) describe(ch chan<- *prometheus.Desc) {\n\tfor _, k := range c.props {\n\t\tprop, err := poolProperties.find(k)\n\t\tif err != nil {\n\t\t\tc.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err)\n\t\t\tcontinue\n\t\t}\n\t\tch <- prop.desc\n\t}\n}\n\nfunc (c *poolCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error {\n\tvar wg sync.WaitGroup\n\terrChan := make(chan error, len(pools))\n\tfor _, pool := range pools {\n\t\twg.Add(1)\n\t\tgo func(pool string) {\n\t\t\tif err := c.updatePoolMetrics(ch, pool); err != nil {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(pool)\n\t}\n\twg.Wait()\n\n\tselect {\n\tcase err := <-errChan:\n\t\treturn err\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (c *poolCollector) updatePoolMetrics(ch chan<- metric, pool string) error {\n\tp := c.client.Pool(pool)\n\tprops, err := p.Properties(c.props...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlabelValues := []string{pool}\n\tfor k, v := range props.Properties() {\n\t\tprop, err := poolProperties.find(k)\n\t\tif err != nil {\n\t\t\tc.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err)\n\t\t}\n\t\tif err = prop.push(ch, v, labelValues...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc newPoolCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {\n\treturn &poolCollector{log: l, client: c, props: props}, nil\n}\n"
  },
  {
    "path": "collector/pool_test.go",
    "content": "package collector\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs\"\n\t\"go.uber.org/mock/gomock\"\n)\n\nfunc TestPoolMetrics(t *testing.T) {\n\ttestCases := []struct {\n\t\tname           string\n\t\tpools          []string\n\t\texplicitPools  []string\n\t\tpropsRequested []string\n\t\tmetricNames    []string\n\t\tpropsResults   map[string]map[string]string\n\t\tmetricResults  string\n\t}{\n\t\t{\n\t\t\tname:           `all metrics`,\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`allocated`, `dedupratio`, `capacity`, `expandsize`, `fragmentation`, `free`, `freeing`, `health`, `leaked`, `readonly`, `size`},\n\t\t\tmetricNames:    []string{`zfs_pool_allocated_bytes`, `zfs_pool_deduplication_ratio`, `zfs_pool_capacity_ratio`, `zfs_pool_expand_size_bytes`, `zfs_pool_fragmentation_ratio`, `zfs_pool_free_bytes`, `zfs_pool_freeing_bytes`, `zfs_pool_health`, `zfs_pool_leaked_bytes`, `zfs_pool_readonly`, `zfs_pool_size_bytes`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t`allocated`:     `1024`,\n\t\t\t\t\t`dedupratio`:    `2.50`,\n\t\t\t\t\t`capacity`:      `50`,\n\t\t\t\t\t`expandsize`:    `2048`,\n\t\t\t\t\t`fragmentation`: `25`,\n\t\t\t\t\t`free`:          `1024`,\n\t\t\t\t\t`freeing`:       `0`,\n\t\t\t\t\t`health`:        `ONLINE`,\n\t\t\t\t\t`leaked`:        `1`,\n\t\t\t\t\t`readonly`:      `off`,\n\t\t\t\t\t`size`:          `2048`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.\n# TYPE zfs_pool_allocated_bytes gauge\nzfs_pool_allocated_bytes{pool=\"testpool\"} 1024\n# HELP zfs_pool_capacity_ratio Ratio of pool space used.\n# TYPE zfs_pool_capacity_ratio gauge\nzfs_pool_capacity_ratio{pool=\"testpool\"} 0.5\n# HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool.\n# TYPE zfs_pool_deduplication_ratio gauge\nzfs_pool_deduplication_ratio{pool=\"testpool\"} 0.4\n# HELP zfs_pool_expand_size_bytes Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool.\n# TYPE zfs_pool_expand_size_bytes gauge\nzfs_pool_expand_size_bytes{pool=\"testpool\"} 2048\n# HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool.\n# TYPE zfs_pool_fragmentation_ratio gauge\nzfs_pool_fragmentation_ratio{pool=\"testpool\"} 0.25\n# HELP zfs_pool_free_bytes The amount of free space in bytes available in the pool.\n# TYPE zfs_pool_free_bytes gauge\nzfs_pool_free_bytes{pool=\"testpool\"} 1024\n# HELP zfs_pool_freeing_bytes The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot.\n# TYPE zfs_pool_freeing_bytes gauge\nzfs_pool_freeing_bytes{pool=\"testpool\"} 0\n# HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED].\n# TYPE zfs_pool_health gauge\nzfs_pool_health{pool=\"testpool\"} 0\n# HELP zfs_pool_leaked_bytes Number of leaked bytes in the pool.\n# TYPE zfs_pool_leaked_bytes gauge\nzfs_pool_leaked_bytes{pool=\"testpool\"} 1\n# HELP zfs_pool_readonly Read-only status of the pool [0: read-write, 1: read-only].\n# TYPE zfs_pool_readonly gauge\nzfs_pool_readonly{pool=\"testpool\"} 0\n# HELP zfs_pool_size_bytes Total size in bytes of the storage pool.\n# TYPE zfs_pool_size_bytes gauge\nzfs_pool_size_bytes{pool=\"testpool\"} 2048\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `multiple pools`,\n\t\t\tpools:          []string{`testpool1`, `testpool2`},\n\t\t\tpropsRequested: []string{`allocated`},\n\t\t\tmetricNames:    []string{`zfs_pool_allocated_bytes`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`testpool1`: {\n\t\t\t\t\t`allocated`: `1024`,\n\t\t\t\t},\n\t\t\t\t`testpool2`: {\n\t\t\t\t\t`allocated`: `2048`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.\n# TYPE zfs_pool_allocated_bytes gauge\nzfs_pool_allocated_bytes{pool=\"testpool1\"} 1024\nzfs_pool_allocated_bytes{pool=\"testpool2\"} 2048\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `explicit pools`,\n\t\t\tpools:          []string{`testpool1`, `testpool2`},\n\t\t\texplicitPools:  []string{`testpool1`},\n\t\t\tpropsRequested: []string{`allocated`},\n\t\t\tmetricNames:    []string{`zfs_pool_allocated_bytes`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`testpool1`: {\n\t\t\t\t\t`allocated`: `1024`,\n\t\t\t\t},\n\t\t\t\t`testpool2`: {\n\t\t\t\t\t`allocated`: `2048`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.\n# TYPE zfs_pool_allocated_bytes gauge\nzfs_pool_allocated_bytes{pool=\"testpool1\"} 1024\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `health status`,\n\t\t\tpools:          []string{`onlinepool`, `degradedpool`, `faultedpool`, `offlinepool`, `unavailpool`, `removedpool`, `suspendedpool`},\n\t\t\tpropsRequested: []string{`health`},\n\t\t\tmetricNames:    []string{`zfs_pool_health`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`onlinepool`: {\n\t\t\t\t\t`health`: `ONLINE`,\n\t\t\t\t},\n\t\t\t\t`degradedpool`: {\n\t\t\t\t\t`health`: `DEGRADED`,\n\t\t\t\t},\n\t\t\t\t`faultedpool`: {\n\t\t\t\t\t`health`: `FAULTED`,\n\t\t\t\t},\n\t\t\t\t`offlinepool`: {\n\t\t\t\t\t`health`: `OFFLINE`,\n\t\t\t\t},\n\t\t\t\t`unavailpool`: {\n\t\t\t\t\t`health`: `UNAVAIL`,\n\t\t\t\t},\n\t\t\t\t`removedpool`: {\n\t\t\t\t\t`health`: `REMOVED`,\n\t\t\t\t},\n\t\t\t\t`suspendedpool`: {\n\t\t\t\t\t`health`: `SUSPENDED`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED].\n# TYPE zfs_pool_health gauge\nzfs_pool_health{pool=\"onlinepool\"} 0\nzfs_pool_health{pool=\"degradedpool\"} 1\nzfs_pool_health{pool=\"faultedpool\"} 2\nzfs_pool_health{pool=\"offlinepool\"} 3\nzfs_pool_health{pool=\"unavailpool\"} 4\nzfs_pool_health{pool=\"removedpool\"} 5\nzfs_pool_health{pool=\"suspendedpool\"} 6\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `unsupported metric`,\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`unsupported`},\n\t\t\tmetricNames:    []string{`zfs_pool_unsupported`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t`unsupported`: `1024`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!\n# TYPE zfs_pool_unsupported gauge\nzfs_pool_unsupported{pool=\"testpool\"} 1024\n`,\n\t\t},\n\t\t{\n\t\t\tname:           `legacy fragmentation/dedupratio`,\n\t\t\tpools:          []string{`testpool`},\n\t\t\tpropsRequested: []string{`fragmentation`, `dedupratio`},\n\t\t\tmetricNames:    []string{`zfs_pool_fragmentation_ratio`, `zfs_pool_deduplication_ratio`},\n\t\t\tpropsResults: map[string]map[string]string{\n\t\t\t\t`testpool`: {\n\t\t\t\t\t`fragmentation`: `5%`,\n\t\t\t\t\t`dedupratio`:    `2.50x`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricResults: `# HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool.\n# TYPE zfs_pool_fragmentation_ratio gauge\nzfs_pool_fragmentation_ratio{pool=\"testpool\"} 0.05\n# HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool.\n# TYPE zfs_pool_deduplication_ratio gauge\nzfs_pool_deduplication_ratio{pool=\"testpool\"} 0.4\n`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tctrl, ctx := gomock.WithContext(context.Background(), t)\n\t\t\tzfsClient := mock_zfs.NewMockClient(ctrl)\n\t\t\tconfig := defaultConfig(zfsClient)\n\t\t\tif tc.explicitPools != nil {\n\t\t\t\tconfig.Pools = tc.explicitPools\n\t\t\t}\n\n\t\t\tzfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1)\n\t\t\tfor _, pool := range tc.pools {\n\t\t\t\tif tc.explicitPools != nil {\n\t\t\t\t\twanted := false\n\t\t\t\t\tfor _, explicit := range tc.explicitPools {\n\t\t\t\t\t\tif pool == explicit {\n\t\t\t\t\t\t\twanted = true\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif !wanted {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tzfsPoolProperties := mock_zfs.NewMockPoolProperties(ctrl)\n\t\t\t\tzfsPoolProperties.EXPECT().Properties().Return(tc.propsResults[pool]).Times(1)\n\t\t\t\tzfsPool := mock_zfs.NewMockPool(ctrl)\n\t\t\t\tzfsPool.EXPECT().Properties(tc.propsRequested).Return(zfsPoolProperties, nil).Times(1)\n\t\t\t\tzfsClient.EXPECT().Pool(pool).Return(zfsPool).Times(1)\n\t\t\t}\n\n\t\t\tcollector, err := NewZFS(config)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tcollector.Collectors = map[string]State{\n\t\t\t\t`pool`: {\n\t\t\t\t\tName:       \"pool\",\n\t\t\t\t\tEnabled:    boolPointer(true),\n\t\t\t\t\tProperties: stringPointer(strings.Join(tc.propsRequested, `,`)),\n\t\t\t\t\tfactory:    newPoolCollector,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "collector/transform.go",
    "content": "package collector\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n)\n\ntype poolHealthCode int\n\nconst (\n\tpoolOnline poolHealthCode = iota\n\tpoolDegraded\n\tpoolFaulted\n\tpoolOffline\n\tpoolUnavail\n\tpoolRemoved\n\tpoolSuspended\n)\n\nfunc transformNumeric(value string) (float64, error) {\n\tif value == `-` || value == `none` {\n\t\treturn 0, nil\n\t}\n\treturn strconv.ParseFloat(value, 64)\n}\n\nfunc transformHealthCode(status string) (float64, error) {\n\tvar result poolHealthCode\n\tswitch zfs.PoolStatus(status) {\n\tcase zfs.PoolOnline:\n\t\tresult = poolOnline\n\tcase zfs.PoolDegraded:\n\t\tresult = poolDegraded\n\tcase zfs.PoolFaulted:\n\t\tresult = poolFaulted\n\tcase zfs.PoolOffline:\n\t\tresult = poolOffline\n\tcase zfs.PoolUnavail:\n\t\tresult = poolUnavail\n\tcase zfs.PoolRemoved:\n\t\tresult = poolRemoved\n\tcase zfs.PoolSuspended:\n\t\tresult = poolSuspended\n\tdefault:\n\t\treturn -1, fmt.Errorf(`unknown pool heath status: %s`, status)\n\t}\n\n\treturn float64(result), nil\n}\n\nfunc transformBool(value string) (float64, error) {\n\tswitch value {\n\tcase `on`, `yes`, `enabled`, `active`:\n\t\treturn 1, nil\n\tcase `off`, `no`, `disabled`, `inactive`, `-`:\n\t\treturn 0, nil\n\t}\n\n\treturn -1, fmt.Errorf(`could not convert '%s' to bool`, value)\n}\n\nfunc transformPercentage(value string) (float64, error) {\n\tif len(value) > 0 && value[len(value)-1] == '%' {\n\t\tvalue = value[:len(value)-1]\n\t}\n\tv, err := transformNumeric(value)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn v / 100, nil\n}\n\nfunc transformMultiplier(value string) (float64, error) {\n\tif len(value) > 0 && value[len(value)-1] == 'x' {\n\t\tvalue = value[:len(value)-1]\n\t}\n\tv, err := transformNumeric(value)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn 1 / v, nil\n}\n"
  },
  {
    "path": "collector/zfs.go",
    "content": "package collector\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype regexpCollection []*regexp.Regexp\n\nfunc (c regexpCollection) MatchString(input string) bool {\n\tfor _, r := range c {\n\t\tif r.MatchString(input) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// ZFSConfig configures a ZFS collector\ntype ZFSConfig struct {\n\tDisableMetrics bool\n\tDeadline       time.Duration\n\tPools          []string\n\tExcludes       []string\n\tLogger         *slog.Logger\n\tZFSClient      zfs.Client\n}\n\n// ZFS collector\ntype ZFS struct {\n\tPools          []string\n\tCollectors     map[string]State\n\tclient         zfs.Client\n\tdisableMetrics bool\n\tdeadline       time.Duration\n\tcache          *metricCache\n\tready          chan struct{}\n\tlogger         *slog.Logger\n\texcludes       regexpCollection\n}\n\n// Describe implements the prometheus.Collector interface.\nfunc (c *ZFS) Describe(ch chan<- *prometheus.Desc) {\n\tif !c.disableMetrics {\n\t\tch <- scrapeDurationDesc\n\t\tch <- scrapeSuccessDesc\n\t}\n\n\tfor _, state := range c.Collectors {\n\t\tif !*state.Enabled {\n\t\t\tcontinue\n\t\t}\n\n\t\tcollector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcollector.describe(ch)\n\t}\n}\n\n// Collect implements the prometheus.Collector interface.\nfunc (c *ZFS) Collect(ch chan<- prometheus.Metric) {\n\tselect {\n\tcase <-c.ready:\n\tdefault:\n\t\tc.sendCached(ch, make(map[string]struct{}))\n\t\treturn\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), c.deadline)\n\tdefer cancel()\n\n\tcache := newMetricCache()\n\tproxy := make(chan metric)\n\t// Synchronize on collector completion.\n\twg := sync.WaitGroup{}\n\twg.Add(len(c.Collectors))\n\t// Synchonize after timeout event, ensuring no writers are still active when we return control.\n\ttimeout := make(chan struct{})\n\tfinalized := make(chan struct{})\n\tfinalize := func() {\n\t\tselect {\n\t\tcase <-finalized:\n\t\tdefault:\n\t\t\tclose(finalized)\n\t\t}\n\t}\n\n\t// Close the proxy channel upon collector completion.\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(proxy)\n\t}()\n\n\t// Cache metrics as they come in via the proxy channel, and ship them out if we've not exceeded the deadline.\n\tgo func() {\n\t\tfor metric := range proxy {\n\t\t\tcache.add(metric)\n\t\t\tselect {\n\t\t\tcase <-timeout:\n\t\t\t\tfinalize()\n\t\t\tdefault:\n\t\t\t\tch <- metric.prometheus\n\t\t\t}\n\t\t}\n\t\t// Signal completion and update full cache.\n\t\tc.cache.replace(cache)\n\t\tcancel()\n\t\t// Notify next collection that we're ready to collect again\n\t\tc.ready <- struct{}{}\n\t}()\n\n\tpools, poolErr := c.getPools(c.Pools)\n\n\tfor name, state := range c.Collectors {\n\t\tif !*state.Enabled {\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\n\t\tif poolErr != nil {\n\t\t\tc.publishCollectorMetrics(ctx, name, poolErr, 0, proxy)\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\n\t\tcollector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`))\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"Error instantiating collector\", \"collector\", name, \"err\", err)\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\t\tgo func(name string, collector Collector) {\n\t\t\tc.execute(ctx, name, collector, proxy, pools)\n\t\t\twg.Done()\n\t\t}(name, collector)\n\t}\n\n\t// Wait for completion or timeout\n\t<-ctx.Done()\n\terr := ctx.Err()\n\tif err == context.Canceled {\n\t\tfinalize()\n\t} else if err != nil {\n\t\t// Upon exceeding deadline, send cached data for any metrics that have not already been reported.\n\t\tclose(timeout) // assert timeout for flow control in other goroutines\n\t\tc.cache.merge(cache)\n\t\tcacheIndex := cache.index()\n\t\tc.sendCached(ch, cacheIndex)\n\t}\n\t// Ensure there are no in-flight writes to the upstream channel\n\t<-finalized\n}\n\n// sendCached values that do not appear in the current cacheIndex.\nfunc (c *ZFS) sendCached(ch chan<- prometheus.Metric, cacheIndex map[string]struct{}) {\n\tc.cache.RLock()\n\tdefer c.cache.RUnlock()\n\tfor name, metric := range c.cache.cache {\n\t\tif _, ok := cacheIndex[name]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tch <- metric\n\t}\n}\n\nfunc (c *ZFS) getPools(pools []string) ([]string, error) {\n\tpoolNames, err := c.client.PoolNames()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Return all pools if not explicitly configured.\n\tif len(pools) == 0 {\n\t\treturn poolNames, nil\n\t}\n\n\t// Configured pools may not exist, so append available pools as they're found, rather than allocating up front.\n\tresult := make([]string, 0)\n\tfor _, want := range pools {\n\t\tfound := false\n\t\tfor _, avail := range poolNames {\n\t\t\tif want == avail {\n\t\t\t\tresult = append(result, want)\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tc.logger.Warn(\"Pool unavailable\", \"pool\", want)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc (c *ZFS) execute(ctx context.Context, name string, collector Collector, ch chan<- metric, pools []string) {\n\tbegin := time.Now()\n\terr := collector.update(ch, pools, c.excludes)\n\tduration := time.Since(begin)\n\n\tc.publishCollectorMetrics(ctx, name, err, duration, ch)\n}\n\nfunc (c *ZFS) publishCollectorMetrics(ctx context.Context, name string, err error, duration time.Duration, ch chan<- metric) {\n\tvar success float64\n\n\tif err != nil {\n\t\tc.logger.Error(\"Executing collector\", \"status\", \"error\", \"collector\", name, \"durationSeconds\", duration.Seconds(), \"err\", err)\n\t\tsuccess = 0\n\t} else {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\tdefault:\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil && err != context.Canceled {\n\t\t\tc.logger.Warn(\"Executing collector\", \"status\", \"delayed\", \"collector\", name, \"durationSeconds\", duration.Seconds(), \"err\", ctx.Err())\n\t\t\tsuccess = 0\n\t\t} else {\n\t\t\tc.logger.Debug(\"Executing collector\", \"status\", \"ok\", \"collector\", name, \"durationSeconds\", duration.Seconds())\n\t\t\tsuccess = 1\n\t\t}\n\t}\n\n\tif c.disableMetrics {\n\t\treturn\n\t}\n\tch <- metric{\n\t\tname:       scrapeDurationDescName,\n\t\tprometheus: prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name),\n\t}\n\tch <- metric{\n\t\tname:       scrapeSuccessDescName,\n\t\tprometheus: prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name),\n\t}\n}\n\n// NewZFS instantiates a ZFS collector with the provided ZFSConfig\nfunc NewZFS(config ZFSConfig) (*ZFS, error) {\n\tsort.Strings(config.Pools)\n\tsort.Strings(config.Excludes)\n\texcludes := make(regexpCollection, len(config.Excludes))\n\tfor i, v := range config.Excludes {\n\t\texcludes[i] = regexp.MustCompile(v)\n\t}\n\tready := make(chan struct{}, 1)\n\tready <- struct{}{}\n\treturn &ZFS{\n\t\tdisableMetrics: config.DisableMetrics,\n\t\tclient:         config.ZFSClient,\n\t\tdeadline:       config.Deadline,\n\t\tPools:          config.Pools,\n\t\tCollectors:     collectorStates,\n\t\texcludes:       excludes,\n\t\tcache:          newMetricCache(),\n\t\tready:          ready,\n\t\tlogger:         config.Logger,\n\t}, nil\n}\n"
  },
  {
    "path": "collector/zfs_test.go",
    "content": "package collector\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs\"\n\t\"go.uber.org/mock/gomock\"\n)\n\nfunc TestZFSCollectInvalidPools(t *testing.T) {\n\tconst result = `# HELP zfs_scrape_collector_duration_seconds zfs_exporter: Duration of a collector scrape.\n# TYPE zfs_scrape_collector_duration_seconds gauge\nzfs_scrape_collector_duration_seconds{collector=\"pool\"} 0\n# HELP zfs_scrape_collector_success zfs_exporter: Whether a collector succeeded.\n# TYPE zfs_scrape_collector_success gauge\nzfs_scrape_collector_success{collector=\"pool\"} 0\n`\n\n\tctrl, ctx := gomock.WithContext(context.Background(), t)\n\tzfsClient := mock_zfs.NewMockClient(ctrl)\n\tzfsClient.EXPECT().PoolNames().Return(nil, errors.New(`Error returned from PoolNames()`)).Times(1)\n\n\tconfig := defaultConfig(zfsClient)\n\tconfig.DisableMetrics = false\n\tcollector, err := NewZFS(config)\n\tcollector.Collectors = map[string]State{\n\t\t`pool`: {\n\t\t\tName:       \"pool\",\n\t\t\tEnabled:    boolPointer(true),\n\t\t\tProperties: stringPointer(``),\n\t\t\tfactory:    newPoolCollector,\n\t\t},\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err = callCollector(ctx, collector, []byte(result), []string{`zfs_scrape_collector_duration_seconds`, `zfs_scrape_collector_success`}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/pdf/zfs_exporter/v2\n\ngo 1.24.0\n\ntoolchain go1.24.2\n\nrequire (\n\tgithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/prometheus/common v0.67.4\n\tgolang.org/x/sys v0.38.0 // indirect\n)\n\nrequire (\n\tgithub.com/alecthomas/kingpin/v2 v2.4.0\n\tgithub.com/prometheus/exporter-toolkit v0.15.0\n\tgo.uber.org/mock v0.6.0\n)\n\nrequire (\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/coreos/go-systemd/v22 v22.6.0 // indirect\n\tgithub.com/golang-jwt/jwt/v5 v5.3.0 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/jpillora/backoff v1.0.0 // indirect\n\tgithub.com/kylelemons/godebug v1.1.0 // indirect\n\tgithub.com/mdlayher/socket v0.5.1 // indirect\n\tgithub.com/mdlayher/vsock v1.2.1 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect\n\tgithub.com/prometheus/client_model v0.6.2 // indirect\n\tgithub.com/prometheus/procfs v0.19.2 // indirect\n\tgithub.com/rogpeppe/go-internal v1.11.0 // indirect\n\tgithub.com/xhit/go-str2duration/v2 v2.1.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.3 // indirect\n\tgolang.org/x/crypto v0.45.0 // indirect\n\tgolang.org/x/mod v0.29.0 // indirect\n\tgolang.org/x/net v0.47.0 // indirect\n\tgolang.org/x/oauth2 v0.33.0 // indirect\n\tgolang.org/x/sync v0.18.0 // indirect\n\tgolang.org/x/text v0.31.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgolang.org/x/tools v0.38.0 // indirect\n\tgoogle.golang.org/protobuf v1.36.10 // indirect\n)\n\ntool go.uber.org/mock/mockgen\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=\ngithub.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=\ngithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=\ngithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=\ngithub.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=\ngithub.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=\ngithub.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=\ngithub.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=\ngithub.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=\ngithub.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=\ngithub.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=\ngithub.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=\ngithub.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk=\ngithub.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=\ngithub.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=\ngithub.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=\ngithub.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=\ngithub.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=\ngo.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngolang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=\ngolang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=\ngolang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=\ngolang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=\ngolang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=\ngolang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=\ngolang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=\ngolang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=\ngolang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=\ngolang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=\ngolang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngolang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=\ngolang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=\ngoogle.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=\ngoogle.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "zfs/dataset.go",
    "content": "package zfs\n\nimport (\n\t\"strings\"\n)\n\n// DatasetKind enum of supported dataset types\ntype DatasetKind string\n\nconst (\n\t// DatasetFilesystem enum entry\n\tDatasetFilesystem DatasetKind = `filesystem`\n\t// DatasetVolume enum entry\n\tDatasetVolume DatasetKind = `volume`\n\t// DatasetSnapshot enum entry\n\tDatasetSnapshot DatasetKind = `snapshot`\n)\n\ntype datasetsImpl struct {\n\tpool string\n\tkind DatasetKind\n}\n\nfunc (d datasetsImpl) Pool() string {\n\treturn d.pool\n}\n\nfunc (d datasetsImpl) Kind() DatasetKind {\n\treturn d.kind\n}\n\nfunc (d datasetsImpl) Properties(props ...string) ([]DatasetProperties, error) {\n\thandler := newDatasetHandler()\n\tif err := execute(d.pool, handler, `zfs`, `get`, `-Hprt`, string(d.kind), `-o`, `name,property,value`, strings.Join(props, `,`)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn handler.datasets(), nil\n}\n\ntype datasetPropertiesImpl struct {\n\tdatasetName string\n\tproperties  map[string]string\n}\n\nfunc (p *datasetPropertiesImpl) DatasetName() string {\n\treturn p.datasetName\n}\n\nfunc (p *datasetPropertiesImpl) Properties() map[string]string {\n\treturn p.properties\n}\n\n// datasetHandler handles parsing of the data returned from the CLI into Dataset structs\ntype datasetHandler struct {\n\tstore map[string]*datasetPropertiesImpl\n}\n\n// processLine implements the handler interface\nfunc (h *datasetHandler) processLine(pool string, line []string) error {\n\tif len(line) != 3 || !strings.HasPrefix(line[0], pool) {\n\t\treturn ErrInvalidOutput\n\t}\n\tif _, ok := h.store[line[0]]; !ok {\n\t\th.store[line[0]] = newDatasetPropertiesImpl(line[0])\n\t}\n\th.store[line[0]].properties[line[1]] = line[2]\n\treturn nil\n}\n\nfunc (h *datasetHandler) datasets() []DatasetProperties {\n\tresult := make([]DatasetProperties, len(h.store))\n\ti := 0\n\tfor _, dataset := range h.store {\n\t\tresult[i] = dataset\n\t\ti++\n\t}\n\treturn result\n}\n\nfunc newDatasetPropertiesImpl(name string) *datasetPropertiesImpl {\n\treturn &datasetPropertiesImpl{\n\t\tdatasetName: name,\n\t\tproperties:  make(map[string]string),\n\t}\n}\n\nfunc newDatasetsImpl(pool string, kind DatasetKind) datasetsImpl {\n\treturn datasetsImpl{\n\t\tpool: pool,\n\t\tkind: kind,\n\t}\n}\n\nfunc newDatasetHandler() *datasetHandler {\n\treturn &datasetHandler{\n\t\tstore: make(map[string]*datasetPropertiesImpl),\n\t}\n}\n"
  },
  {
    "path": "zfs/mock_zfs/mock_zfs.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: zfs.go\n//\n// Generated by this command:\n//\n//\tmockgen -source=zfs.go -destination=mock_zfs/mock_zfs.go -package=mock_zfs\n//\n\n// Package mock_zfs is a generated GoMock package.\npackage mock_zfs\n\nimport (\n\treflect \"reflect\"\n\n\tzfs \"github.com/pdf/zfs_exporter/v2/zfs\"\n\tgomock \"go.uber.org/mock/gomock\"\n)\n\n// MockClient is a mock of Client interface.\ntype MockClient struct {\n\tctrl     *gomock.Controller\n\trecorder *MockClientMockRecorder\n\tisgomock struct{}\n}\n\n// MockClientMockRecorder is the mock recorder for MockClient.\ntype MockClientMockRecorder struct {\n\tmock *MockClient\n}\n\n// NewMockClient creates a new mock instance.\nfunc NewMockClient(ctrl *gomock.Controller) *MockClient {\n\tmock := &MockClient{ctrl: ctrl}\n\tmock.recorder = &MockClientMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockClient) EXPECT() *MockClientMockRecorder {\n\treturn m.recorder\n}\n\n// Datasets mocks base method.\nfunc (m *MockClient) Datasets(pool string, kind zfs.DatasetKind) zfs.Datasets {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Datasets\", pool, kind)\n\tret0, _ := ret[0].(zfs.Datasets)\n\treturn ret0\n}\n\n// Datasets indicates an expected call of Datasets.\nfunc (mr *MockClientMockRecorder) Datasets(pool, kind any) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Datasets\", reflect.TypeOf((*MockClient)(nil).Datasets), pool, kind)\n}\n\n// Pool mocks base method.\nfunc (m *MockClient) Pool(name string) zfs.Pool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Pool\", name)\n\tret0, _ := ret[0].(zfs.Pool)\n\treturn ret0\n}\n\n// Pool indicates an expected call of Pool.\nfunc (mr *MockClientMockRecorder) Pool(name any) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Pool\", reflect.TypeOf((*MockClient)(nil).Pool), name)\n}\n\n// PoolNames mocks base method.\nfunc (m *MockClient) PoolNames() ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PoolNames\")\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// PoolNames indicates an expected call of PoolNames.\nfunc (mr *MockClientMockRecorder) PoolNames() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PoolNames\", reflect.TypeOf((*MockClient)(nil).PoolNames))\n}\n\n// MockPool is a mock of Pool interface.\ntype MockPool struct {\n\tctrl     *gomock.Controller\n\trecorder *MockPoolMockRecorder\n\tisgomock struct{}\n}\n\n// MockPoolMockRecorder is the mock recorder for MockPool.\ntype MockPoolMockRecorder struct {\n\tmock *MockPool\n}\n\n// NewMockPool creates a new mock instance.\nfunc NewMockPool(ctrl *gomock.Controller) *MockPool {\n\tmock := &MockPool{ctrl: ctrl}\n\tmock.recorder = &MockPoolMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockPool) EXPECT() *MockPoolMockRecorder {\n\treturn m.recorder\n}\n\n// Name mocks base method.\nfunc (m *MockPool) Name() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Name\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\n// Name indicates an expected call of Name.\nfunc (mr *MockPoolMockRecorder) Name() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Name\", reflect.TypeOf((*MockPool)(nil).Name))\n}\n\n// Properties mocks base method.\nfunc (m *MockPool) Properties(props ...string) (zfs.PoolProperties, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []any{}\n\tfor _, a := range props {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Properties\", varargs...)\n\tret0, _ := ret[0].(zfs.PoolProperties)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// Properties indicates an expected call of Properties.\nfunc (mr *MockPoolMockRecorder) Properties(props ...any) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Properties\", reflect.TypeOf((*MockPool)(nil).Properties), props...)\n}\n\n// MockPoolProperties is a mock of PoolProperties interface.\ntype MockPoolProperties struct {\n\tctrl     *gomock.Controller\n\trecorder *MockPoolPropertiesMockRecorder\n\tisgomock struct{}\n}\n\n// MockPoolPropertiesMockRecorder is the mock recorder for MockPoolProperties.\ntype MockPoolPropertiesMockRecorder struct {\n\tmock *MockPoolProperties\n}\n\n// NewMockPoolProperties creates a new mock instance.\nfunc NewMockPoolProperties(ctrl *gomock.Controller) *MockPoolProperties {\n\tmock := &MockPoolProperties{ctrl: ctrl}\n\tmock.recorder = &MockPoolPropertiesMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockPoolProperties) EXPECT() *MockPoolPropertiesMockRecorder {\n\treturn m.recorder\n}\n\n// Properties mocks base method.\nfunc (m *MockPoolProperties) Properties() map[string]string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Properties\")\n\tret0, _ := ret[0].(map[string]string)\n\treturn ret0\n}\n\n// Properties indicates an expected call of Properties.\nfunc (mr *MockPoolPropertiesMockRecorder) Properties() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Properties\", reflect.TypeOf((*MockPoolProperties)(nil).Properties))\n}\n\n// MockDatasets is a mock of Datasets interface.\ntype MockDatasets struct {\n\tctrl     *gomock.Controller\n\trecorder *MockDatasetsMockRecorder\n\tisgomock struct{}\n}\n\n// MockDatasetsMockRecorder is the mock recorder for MockDatasets.\ntype MockDatasetsMockRecorder struct {\n\tmock *MockDatasets\n}\n\n// NewMockDatasets creates a new mock instance.\nfunc NewMockDatasets(ctrl *gomock.Controller) *MockDatasets {\n\tmock := &MockDatasets{ctrl: ctrl}\n\tmock.recorder = &MockDatasetsMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockDatasets) EXPECT() *MockDatasetsMockRecorder {\n\treturn m.recorder\n}\n\n// Kind mocks base method.\nfunc (m *MockDatasets) Kind() zfs.DatasetKind {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Kind\")\n\tret0, _ := ret[0].(zfs.DatasetKind)\n\treturn ret0\n}\n\n// Kind indicates an expected call of Kind.\nfunc (mr *MockDatasetsMockRecorder) Kind() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Kind\", reflect.TypeOf((*MockDatasets)(nil).Kind))\n}\n\n// Pool mocks base method.\nfunc (m *MockDatasets) Pool() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Pool\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\n// Pool indicates an expected call of Pool.\nfunc (mr *MockDatasetsMockRecorder) Pool() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Pool\", reflect.TypeOf((*MockDatasets)(nil).Pool))\n}\n\n// Properties mocks base method.\nfunc (m *MockDatasets) Properties(props ...string) ([]zfs.DatasetProperties, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []any{}\n\tfor _, a := range props {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"Properties\", varargs...)\n\tret0, _ := ret[0].([]zfs.DatasetProperties)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// Properties indicates an expected call of Properties.\nfunc (mr *MockDatasetsMockRecorder) Properties(props ...any) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Properties\", reflect.TypeOf((*MockDatasets)(nil).Properties), props...)\n}\n\n// MockDatasetProperties is a mock of DatasetProperties interface.\ntype MockDatasetProperties struct {\n\tctrl     *gomock.Controller\n\trecorder *MockDatasetPropertiesMockRecorder\n\tisgomock struct{}\n}\n\n// MockDatasetPropertiesMockRecorder is the mock recorder for MockDatasetProperties.\ntype MockDatasetPropertiesMockRecorder struct {\n\tmock *MockDatasetProperties\n}\n\n// NewMockDatasetProperties creates a new mock instance.\nfunc NewMockDatasetProperties(ctrl *gomock.Controller) *MockDatasetProperties {\n\tmock := &MockDatasetProperties{ctrl: ctrl}\n\tmock.recorder = &MockDatasetPropertiesMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockDatasetProperties) EXPECT() *MockDatasetPropertiesMockRecorder {\n\treturn m.recorder\n}\n\n// DatasetName mocks base method.\nfunc (m *MockDatasetProperties) DatasetName() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DatasetName\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}\n\n// DatasetName indicates an expected call of DatasetName.\nfunc (mr *MockDatasetPropertiesMockRecorder) DatasetName() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DatasetName\", reflect.TypeOf((*MockDatasetProperties)(nil).DatasetName))\n}\n\n// Properties mocks base method.\nfunc (m *MockDatasetProperties) Properties() map[string]string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Properties\")\n\tret0, _ := ret[0].(map[string]string)\n\treturn ret0\n}\n\n// Properties indicates an expected call of Properties.\nfunc (mr *MockDatasetPropertiesMockRecorder) Properties() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Properties\", reflect.TypeOf((*MockDatasetProperties)(nil).Properties))\n}\n\n// Mockhandler is a mock of handler interface.\ntype Mockhandler struct {\n\tctrl     *gomock.Controller\n\trecorder *MockhandlerMockRecorder\n\tisgomock struct{}\n}\n\n// MockhandlerMockRecorder is the mock recorder for Mockhandler.\ntype MockhandlerMockRecorder struct {\n\tmock *Mockhandler\n}\n\n// NewMockhandler creates a new mock instance.\nfunc NewMockhandler(ctrl *gomock.Controller) *Mockhandler {\n\tmock := &Mockhandler{ctrl: ctrl}\n\tmock.recorder = &MockhandlerMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *Mockhandler) EXPECT() *MockhandlerMockRecorder {\n\treturn m.recorder\n}\n\n// processLine mocks base method.\nfunc (m *Mockhandler) processLine(pool string, line []string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"processLine\", pool, line)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// processLine indicates an expected call of processLine.\nfunc (mr *MockhandlerMockRecorder) processLine(pool, line any) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"processLine\", reflect.TypeOf((*Mockhandler)(nil).processLine), pool, line)\n}\n"
  },
  {
    "path": "zfs/pool.go",
    "content": "package zfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\n// PoolStatus enum contains status text\ntype PoolStatus string\n\nconst (\n\t// PoolOnline enum entry\n\tPoolOnline PoolStatus = `ONLINE`\n\t// PoolDegraded enum entry\n\tPoolDegraded PoolStatus = `DEGRADED`\n\t// PoolFaulted enum entry\n\tPoolFaulted PoolStatus = `FAULTED`\n\t// PoolOffline enum entry\n\tPoolOffline PoolStatus = `OFFLINE`\n\t// PoolUnavail enum entry\n\tPoolUnavail PoolStatus = `UNAVAIL`\n\t// PoolRemoved enum entry\n\tPoolRemoved PoolStatus = `REMOVED`\n\t// PoolSuspended enum entry\n\tPoolSuspended PoolStatus = `SUSPENDED`\n)\n\ntype poolImpl struct {\n\tname string\n}\n\nfunc (p poolImpl) Name() string {\n\treturn p.name\n}\n\nfunc (p poolImpl) Properties(props ...string) (PoolProperties, error) {\n\thandler := newPoolPropertiesImpl()\n\tif err := execute(p.name, handler, `zpool`, `get`, `-Hpo`, `name,property,value`, strings.Join(props, `,`)); err != nil {\n\t\treturn handler, err\n\t}\n\treturn handler, nil\n}\n\ntype poolPropertiesImpl struct {\n\tproperties map[string]string\n}\n\nfunc (p *poolPropertiesImpl) Properties() map[string]string {\n\treturn p.properties\n}\n\n// processLine implements the handler interface\nfunc (p *poolPropertiesImpl) processLine(pool string, line []string) error {\n\tif len(line) != 3 || line[0] != pool {\n\t\treturn ErrInvalidOutput\n\t}\n\tp.properties[line[1]] = line[2]\n\n\treturn nil\n}\n\n// PoolNames returns a list of available pool names\nfunc poolNames() ([]string, error) {\n\tpools := make([]string, 0)\n\tcmd := exec.Command(`zpool`, `list`, `-Ho`, `name`)\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := bufio.NewScanner(out)\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start command '%s': %w\", cmd.String(), err)\n\t}\n\n\tfor scanner.Scan() {\n\t\tpools = append(pools, scanner.Text())\n\t}\n\n\tstde, _ := io.ReadAll(stderr)\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to execute command '%s'; output: '%s' (%w)\", cmd.String(), strings.TrimSpace(string(stde)), err)\n\t}\n\n\treturn pools, nil\n}\n\nfunc newPoolImpl(name string) poolImpl {\n\treturn poolImpl{\n\t\tname: name,\n\t}\n}\n\nfunc newPoolPropertiesImpl() *poolPropertiesImpl {\n\treturn &poolPropertiesImpl{\n\t\tproperties: make(map[string]string),\n\t}\n}\n"
  },
  {
    "path": "zfs/zfs.go",
    "content": "//go:generate go tool go.uber.org/mock/mockgen -source=zfs.go -destination=mock_zfs/mock_zfs.go -package=mock_zfs\n\npackage zfs\n\nimport (\n\t\"encoding/csv\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\n// ErrInvalidOutput is returned on unparseable CLI output\nvar ErrInvalidOutput = errors.New(`invalid output executing command`)\n\n// Client is the primary entrypoint\ntype Client interface {\n\tPoolNames() ([]string, error)\n\tPool(name string) Pool\n\tDatasets(pool string, kind DatasetKind) Datasets\n}\n\n// Pool allows querying pool properties\ntype Pool interface {\n\tName() string\n\tProperties(props ...string) (PoolProperties, error)\n}\n\n// PoolProperties provides access to the properties for a pool\ntype PoolProperties interface {\n\tProperties() map[string]string\n}\n\n// Datasets allows querying properties for datasets in a pool\ntype Datasets interface {\n\tPool() string\n\tKind() DatasetKind\n\tProperties(props ...string) ([]DatasetProperties, error)\n}\n\n// DatasetProperties provides access to the properties for a dataset\ntype DatasetProperties interface {\n\tDatasetName() string\n\tProperties() map[string]string\n}\n\ntype handler interface {\n\tprocessLine(pool string, line []string) error\n}\n\ntype clientImpl struct{}\n\nfunc (z clientImpl) PoolNames() ([]string, error) {\n\treturn poolNames()\n}\n\nfunc (z clientImpl) Pool(name string) Pool {\n\treturn newPoolImpl(name)\n}\n\nfunc (z clientImpl) Datasets(pool string, kind DatasetKind) Datasets {\n\treturn newDatasetsImpl(pool, kind)\n}\n\nfunc execute(pool string, h handler, cmd string, args ...string) error {\n\tc := exec.Command(cmd, append(args, pool)...)\n\tout, err := c.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := c.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := csv.NewReader(out)\n\tr.Comma = '\\t'\n\tr.LazyQuotes = true\n\tr.ReuseRecord = true\n\tr.FieldsPerRecord = 3\n\n\tif err = c.Start(); err != nil {\n\t\treturn fmt.Errorf(\"failed to start command '%s': %w\", c.String(), err)\n\t}\n\n\tfor {\n\t\tline, err := r.Read()\n\t\tif errors.Is(err, io.EOF) {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = h.processLine(pool, line); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tstde, _ := io.ReadAll(stderr)\n\tif err = c.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"failed to execute command '%s'; output: '%s' (%w)\", c.String(), strings.TrimSpace(string(stde)), err)\n\t}\n\treturn nil\n}\n\n// New instantiates a ZFS Client\nfunc New() Client {\n\treturn clientImpl{}\n}\n"
  },
  {
    "path": "zfs_exporter.go",
    "content": "package main\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/pdf/zfs_exporter/v2/collector\"\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tversioncollector \"github.com/prometheus/client_golang/prometheus/collectors/version\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/prometheus/exporter-toolkit/web\"\n\t\"github.com/prometheus/exporter-toolkit/web/kingpinflag\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/prometheus/common/promslog/flag\"\n\t\"github.com/prometheus/common/version\"\n)\n\nfunc main() {\n\tvar (\n\t\tmetricsPath             = kingpin.Flag(\"web.telemetry-path\", \"Path under which to expose metrics.\").Default(\"/metrics\").String()\n\t\tmetricsExporterDisabled = kingpin.Flag(`web.disable-exporter-metrics`, `Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).`).Default(`false`).Bool()\n\t\tdeadline                = kingpin.Flag(\"deadline\", \"Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when complete (default: 8s)\").Default(\"8s\").Duration()\n\t\tpools                   = kingpin.Flag(\"pool\", \"Name of the pool(s) to collect, repeat for multiple pools (default: all pools).\").Strings()\n\t\texcludes                = kingpin.Flag(\"exclude\", \"Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times.\").Strings()\n\t\ttoolkitFlags            = kingpinflag.AddFlags(kingpin.CommandLine, \":9134\")\n\t)\n\n\tpromslogConfig := &promslog.Config{}\n\tflag.AddFlags(kingpin.CommandLine, promslogConfig)\n\tkingpin.Version(version.Print(\"zfs_exporter\"))\n\tkingpin.HelpFlag.Short('h')\n\tkingpin.Parse()\n\tlogger := promslog.New(promslogConfig)\n\n\tlogger.Info(\"Starting zfs_exporter\", \"version\", version.Info())\n\tlogger.Info(\"Build context\", \"context\", version.BuildContext())\n\n\tc, err := collector.NewZFS(collector.ZFSConfig{\n\t\tDisableMetrics: *metricsExporterDisabled,\n\t\tDeadline:       *deadline,\n\t\tPools:          *pools,\n\t\tExcludes:       *excludes,\n\t\tLogger:         logger,\n\t\tZFSClient:      zfs.New(),\n\t})\n\tif err != nil {\n\t\tlogger.Error(\"Error creating an exporter\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif *metricsExporterDisabled {\n\t\tr := prometheus.NewRegistry()\n\t\tprometheus.DefaultRegisterer = r\n\t\tprometheus.DefaultGatherer = r\n\t}\n\tprometheus.MustRegister(c)\n\tprometheus.MustRegister(versioncollector.NewCollector(\"zfs_exporter\"))\n\n\tif len(c.Pools) > 0 {\n\t\tlogger.Info(\"Enabling pools\", \"pools\", strings.Join(c.Pools, \", \"))\n\t} else {\n\t\tlogger.Info(\"Enabling pools\", \"pools\", \"(all)\")\n\t}\n\n\tcollectorNames := make([]string, 0, len(c.Collectors))\n\tfor n, c := range c.Collectors {\n\t\tif *c.Enabled {\n\t\t\tcollectorNames = append(collectorNames, n)\n\t\t}\n\t}\n\tlogger.Info(\"Enabling collectors\", \"collectors\", strings.Join(collectorNames, \", \"))\n\n\thttp.Handle(*metricsPath, promhttp.Handler())\n\tif *metricsPath != \"/\" {\n\t\tlandingConfig := web.LandingConfig{\n\t\t\tName:        \"ZFS Exporter\",\n\t\t\tDescription: \"Prometheus ZFS Exporter\",\n\t\t\tVersion:     version.Info(),\n\t\t\tLinks: []web.LandingLinks{\n\t\t\t\t{\n\t\t\t\t\tAddress: *metricsPath,\n\t\t\t\t\tText:    \"Metrics\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tlandingPage, err := web.NewLandingPage(landingConfig)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error creating landing page\", \"err\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\thttp.Handle(\"/\", landingPage)\n\t}\n\n\tserver := &http.Server{}\n\terr = web.ListenAndServe(server, toolkitFlags, logger)\n\tif err != nil {\n\t\tlogger.Error(\"Error starting HTTP server\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n}\n"
  }
]