Repository: pdf/zfs_exporter
Branch: master
Commit: 0aacc90c6759
Files: 29
Total size: 111.8 KB
Directory structure:
gitextract__6skz3q_/
├── .github/
│ ├── CONTRIBUTING.md
│ └── workflows/
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .golangci.yml
├── .promu.yml
├── CHANGELOG.md
├── LICENSE
├── Makefile
├── Makefile.common
├── README.md
├── VERSION
├── collector/
│ ├── cache.go
│ ├── collector.go
│ ├── collector_test.go
│ ├── dataset.go
│ ├── dataset_test.go
│ ├── pool.go
│ ├── pool_test.go
│ ├── transform.go
│ ├── zfs.go
│ └── zfs_test.go
├── go.mod
├── go.sum
├── zfs/
│ ├── dataset.go
│ ├── mock_zfs/
│ │ └── mock_zfs.go
│ ├── pool.go
│ └── zfs.go
└── zfs_exporter.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/CONTRIBUTING.md
================================================
# Contributing
When contributing to this repository, please open an issue with a description of the problem you wish to solve, prior to sending a pull request.
## Contributing Code
Please ensure that all code is formatted prior to committing.
### Commit messages
Commits to this repository should have messages that conform to the [AngularJS Git Commit Guidelines](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines).
================================================
FILE: .github/workflows/release.yml
================================================
# This is a basic workflow to help you get started with Actions
name: Release
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: ["master"]
env:
PARALLELISM: 3
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "release"
release:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- name: Go Report Card
uses: creekorful/goreportcard-action@v1.0
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout
id: checkout
uses: actions/checkout@v2
with:
# Fetch all versions for tag/changelog generation
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.24.2
- name: Install promu
id: make_promu
run: |
make promu
- name: Calculate Version
id: calculate_version
uses: mathieudutour/github-tag-action@v4.5
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
dry_run: true
- name: Update Version
id: update_version
env:
NEW_VERSION: ${{ steps.calculate_version.outputs.new_version }}
run: |
echo "${NEW_VERSION}" > VERSION
- name: Update Changelog
id: update_changelog
env:
CHANGELOG: ${{ steps.calculate_version.outputs.changelog }}
run: |
mv CHANGELOG.md _CHANGELOG.md || touch _CHANGELOG.md
echo "${CHANGELOG}" > CHANGELOG.md
cat _CHANGELOG.md >> CHANGELOG.md
rm -f _CHANGELOG.md
- name: Commit Changes
id: commit_changes
uses: EndBug/add-and-commit@v9.1.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
add: VERSION CHANGELOG.md
message: |
chore(build): Releasing ${{ steps.calculate_version.outputs.new_tag }}
- name: Commit Tag
id: commit_tag
uses: mathieudutour/github-tag-action@v6.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
commit_sha: ${{ steps.commit_changes.outputs.commit_long_sha }}
- name: Build
id: build
run: |
promu crossbuild --parallelism $PARALLELISM
promu crossbuild --parallelism $PARALLELISM tarballs
promu checksum .tarballs
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.calculate_version.outputs.new_tag }}
release_name: Release ${{ steps.calculate_version.outputs.new_tag }}
body: |
Changes in this release:
${{ steps.calculate_version.outputs.changelog }}
draft: false
prerelease: false
- name: Upload Release Assets
id: upload_release_assets
uses: AButler/upload-release-assets@v2.0
with:
files: ".tarballs/*"
repo-token: ${{ secrets.GITHUB_TOKEN }}
release-tag: ${{ steps.calculate_version.outputs.new_tag }}
================================================
FILE: .github/workflows/test.yml
================================================
name: Test
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
pull_request:
branches:
- master
push:
branches:
- master
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "test"
test:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout
id: checkout
uses: actions/checkout@v2
with:
# Fetch all versions for tag/changelog generation
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.24.2
- name: Test
id: test
run: |
make test
================================================
FILE: .gitignore
================================================
zfs_exporter
.build/
.tarballs/
================================================
FILE: .golangci.yml
================================================
version: "2"
linters:
enable:
- errorlint
- misspell
- perfsprint
- revive
- testifylint
settings:
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
# Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.
err-error: true
# Optimizes `fmt.Errorf`.
errorf: true
# Optimizes `fmt.Sprintf` with only one argument.
sprintf1: true
# Optimizes into strings concatenation.
strconcat: false
revive:
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
- name: unused-parameter
severity: warning
disabled: true
testifylint:
enable-all: true
disable:
- go-require
formatter:
require-f-funcs: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
formatters:
enable:
- gofumpt
- goimports
settings:
goimports:
local-prefixes:
- github.com/prometheus/common
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
================================================
FILE: .promu.yml
================================================
go:
# Whenever the Go version is updated here,
# .circle/config.yml should also be updated.
version: 1.23
repository:
path: github.com/pdf/zfs_exporter/v2
build:
flags: -a -tags netgo
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
-X github.com/prometheus/common/version.Revision={{.Revision}}
-X github.com/prometheus/common/version.Branch={{.Branch}}
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
crossbuild:
platforms:
- linux
- illumos
- darwin
- freebsd
- netbsd
- dragonfly
tarball:
files:
- LICENSE
- CHANGELOG.md
================================================
FILE: CHANGELOG.md
================================================
## [2.3.12](https://github.com/pdf/zfs_exporter/compare/v2.3.11...v2.3.12) (2026-04-04)
### Bug Fixes
* **docs:** Update installation command for zfs_exporter to v2 ([#66](https://github.com/pdf/zfs_exporter/issues/66)) ([1769a9e](https://github.com/pdf/zfs_exporter/commit/1769a9e))
## [2.3.11](https://github.com/pdf/zfs_exporter/compare/v2.3.10...v2.3.11) (2025-11-24)
### Bug Fixes
* **security:** Bump deps for CVE-2025-58181 ([12cf70c](https://github.com/pdf/zfs_exporter/commit/12cf70c))
## [2.3.10](https://github.com/pdf/zfs_exporter/compare/v2.3.9...v2.3.10) (2025-08-24)
### Bug Fixes
* **props:** Fix filesystem creation property ([9a6beb3](https://github.com/pdf/zfs_exporter/commit/9a6beb3)), closes [#57](https://github.com/pdf/zfs_exporter/issues/57)
## [2.3.9](https://github.com/pdf/zfs_exporter/compare/v2.3.8...v2.3.9) (2025-08-24)
### Bug Fixes
* **props:** Add support for dataset `creation` property ([a1c90f4](https://github.com/pdf/zfs_exporter/commit/a1c90f4)), closes [#57](https://github.com/pdf/zfs_exporter/issues/57)
## [2.3.8](https://github.com/pdf/zfs_exporter/compare/v2.3.7...v2.3.8) (2025-04-20)
### Bug Fixes
* **build:** Bump Go version and golangci-lint ([4d46ab3](https://github.com/pdf/zfs_exporter/commit/4d46ab3))
## [2.3.7](https://github.com/pdf/zfs_exporter/compare/v2.3.6...v2.3.7) (2025-04-20)
### Bug Fixes
* **deps:** Bump dependencies ([6af54d2](https://github.com/pdf/zfs_exporter/commit/6af54d2))
## [2.3.6](https://github.com/pdf/zfs_exporter/compare/v2.3.5...v2.3.6) (2025-01-18)
### Bug Fixes
* **build:** Bump Go version in actions ([00498df](https://github.com/pdf/zfs_exporter/commit/00498df))
## [2.3.5](https://github.com/pdf/zfs_exporter/compare/v2.3.4...v2.3.5) (2025-01-18)
### Bug Fixes
* **core:** Bump dependencies, migrate to promslog ([ccc2b21](https://github.com/pdf/zfs_exporter/commit/ccc2b21))
## [2.3.4](https://github.com/pdf/zfs_exporter/compare/v2.3.3...v2.3.4) (2024-04-13)
### Bug Fixes
* **deps:** Bump deps for security ([1404536](https://github.com/pdf/zfs_exporter/commit/1404536))
## [2.3.3](https://github.com/pdf/zfs_exporter/compare/v2.3.2...v2.3.3) (2024-04-13)
### Bug Fixes
* **log:** Improve command execution error output ([2277832](https://github.com/pdf/zfs_exporter/commit/2277832))
## [2.3.2](https://github.com/pdf/zfs_exporter/compare/v2.3.1...v2.3.2) (2023-10-13)
## [2.3.1](https://github.com/pdf/zfs_exporter/compare/v2.3.0...v2.3.1) (2023-08-12)
### Bug Fixes
* **build:** Update deps ([ddf8e09](https://github.com/pdf/zfs_exporter/commit/ddf8e09))
# [2.3.0](https://github.com/pdf/zfs_exporter/compare/v2.2.8...v2.3.0) (2023-08-12)
### Features
* **server:** Add exporter toolkit for TLS support ([8102e2e](https://github.com/pdf/zfs_exporter/commit/8102e2e)), closes [#34](https://github.com/pdf/zfs_exporter/issues/34)
## [2.2.8](https://github.com/pdf/zfs_exporter/compare/v2.2.7...v2.2.8) (2023-04-22)
### Bug Fixes
* **build:** Tag correct commit SHA ([0712333](https://github.com/pdf/zfs_exporter/commit/0712333))
* **security:** Update dependencies for upstream vulnerabilities ([2220da2](https://github.com/pdf/zfs_exporter/commit/2220da2))
## [2.2.7](https://github.com/pdf/zfs_exporter/compare/v2.2.6...v2.2.7) (2023-01-28)
### Bug Fixes
* **transform:** Add support for ancient ZFS dedupratio metric ([85bdc3b](https://github.com/pdf/zfs_exporter/commit/85bdc3b)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26)
## [2.2.6](https://github.com/pdf/zfs_exporter/compare/v2.2.5...v2.2.6) (2023-01-28)
### Bug Fixes
* **transform:** Add support for ancient ZFS fragmentation metric ([a0240d1](https://github.com/pdf/zfs_exporter/commit/a0240d1)), closes [#26](https://github.com/pdf/zfs_exporter/issues/26)
## [2.2.5](https://github.com/pdf/zfs_exporter/compare/v2.2.4...v2.2.5) (2022-01-30)
### Bug Fixes
* **core:** Correctly handle and report errors listing pools ([efbcceb](https://github.com/pdf/zfs_exporter/commit/efbcceb)), closes [#18](https://github.com/pdf/zfs_exporter/issues/18)
## [2.2.4](https://github.com/pdf/zfs_exporter/compare/v2.2.3...v2.2.4) (2022-01-05)
### Bug Fixes
* **build:** Update promu config to build v2 ([2a38914](https://github.com/pdf/zfs_exporter/commit/2a38914))
## [2.2.3](https://github.com/pdf/zfs_exporter/compare/v2.2.2...v2.2.3) (2022-01-05)
### Bug Fixes
* **build:** update go module version to match release tag major version ([f709083](https://github.com/pdf/zfs_exporter/commit/f709083))
## [2.2.2](https://github.com/pdf/zfs_exporter/compare/v2.2.1...v2.2.2) (2021-11-16)
### Bug Fixes
* **metrics:** Fix typo in metric name ([bbd3d91](https://github.com/pdf/zfs_exporter/commit/bbd3d91))
* **pool:** Add SUSPENDED status ([9b9e655](https://github.com/pdf/zfs_exporter/commit/9b9e655))
* **tests:** Remove unnecessary duration conversion ([b6a29ab](https://github.com/pdf/zfs_exporter/commit/b6a29ab))
## [2.2.1](https://github.com/pdf/zfs_exporter/compare/v2.2.0...v2.2.1) (2021-09-13)
### Bug Fixes
* **collector:** Avoid race on upstream channel close, tidy sync points ([e6fbdf5](https://github.com/pdf/zfs_exporter/commit/e6fbdf5))
* **docs:** Document web.disable-exporter-metrics flag in README ([20182da](https://github.com/pdf/zfs_exporter/commit/20182da))
# [2.2.0](https://github.com/pdf/zfs_exporter/compare/v2.1.1...v2.2.0) (2021-09-04)
### Bug Fixes
* **docs:** Correct misspelling ([066c7d2](https://github.com/pdf/zfs_exporter/commit/066c7d2))
### Features
* **metrics:** Allow disabling exporter metrics ([1ca8717](https://github.com/pdf/zfs_exporter/commit/1ca8717)), closes [#2](https://github.com/pdf/zfs_exporter/issues/2)
## [2.1.1](https://github.com/pdf/zfs_exporter/compare/v2.1.0...v2.1.1) (2021-08-27)
### Bug Fixes
* **build:** Update to Go 1.17 for crossbuild, and enable all platforms ([f47b69a](https://github.com/pdf/zfs_exporter/commit/f47b69a))
* **core:** Update dependencies ([b39382b](https://github.com/pdf/zfs_exporter/commit/b39382b))
# [2.1.0](https://github.com/pdf/zfs_exporter/compare/v2.0.0...v2.1.0) (2021-08-18)
### Bug Fixes
* **logging:** Include collector in warning for unsupported properties ([1760a4a](https://github.com/pdf/zfs_exporter/commit/1760a4a))
* **metrics:** Invert ratio for multiplier fields, and clarify their docs ([1a7bc3a](https://github.com/pdf/zfs_exporter/commit/1a7bc3a)), closes [#11](https://github.com/pdf/zfs_exporter/issues/11)
### Features
* **build:** Update to Go 1.17 ([b64115c](https://github.com/pdf/zfs_exporter/commit/b64115c))
# [2.0.0](https://github.com/pdf/zfs_exporter/compare/v1.0.1...v2.0.0) (2021-08-14)
### Code Refactoring
* **collector:** Migrate to internal ZFS CLI implementation ([53b0e98](https://github.com/pdf/zfs_exporter/commit/53b0e98)), closes [#7](https://github.com/pdf/zfs_exporter/issues/7) [#9](https://github.com/pdf/zfs_exporter/issues/9) [#10](https://github.com/pdf/zfs_exporter/issues/10)
### Features
* **performance:** Execute collection concurrently per pool ([ccc6f22](https://github.com/pdf/zfs_exporter/commit/ccc6f22))
* **zfs:** Add local ZFS CLI parsing ([f5050b1](https://github.com/pdf/zfs_exporter/commit/f5050b1))
### BREAKING CHANGES
* **collector:** Ratio values are now properly calculated in the range
0-1, rather than being passed verbatim.
The following metrics are affected by this change:
- zfs_pool_deduplication_ratio
- zfs_pool_capacity_ratio
- zfs_pool_fragmentation_ratio
- zfs_dataset_compression_ratio
- zfs_dataset_referenced_compression_ratio
Additionally, the zfs_dataset_fragmentation_percent metric has been
renamed to zfs_dataset_fragmentation_ratio.
## [1.0.1](https://github.com/pdf/zfs_exporter/compare/v1.0.0...v1.0.1) (2021-08-03)
### Bug Fixes
* fix copy and paste errors when accessing dataset properties ([c0fc6b2](https://github.com/pdf/zfs_exporter/commit/c0fc6b2))
# [1.0.0](https://github.com/pdf/zfs_exporter/compare/v0.0.3...v1.0.0) (2021-06-22)
### Bug Fixes
* **ci:** Fix syntax error in github actions workflow ([0b6e8bc](https://github.com/pdf/zfs_exporter/commit/0b6e8bc))
### Code Refactoring
* **core:** Update prometheus toolchain and refactor internals ([056b386](https://github.com/pdf/zfs_exporter/commit/056b386))
### Features
* **enhancement:** Allow excluding datasets by regular expression ([8dd48ba](https://github.com/pdf/zfs_exporter/commit/8dd48ba)), closes [#3](https://github.com/pdf/zfs_exporter/issues/3)
### BREAKING CHANGES
* **core:** Go API has changed somewhat, but metrics remain
unaffected.
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2018 Peter Fern
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: Makefile
================================================
# Copyright 2015 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Needs to be defined before including Makefile.common to auto-generate targets
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
DOCKER_IMAGE_NAME ?= zfs-exporter
.PHONY: all
all:: test build
.PHONY: test
test:: vet precheck style lint unused common-test
include Makefile.common
================================================
FILE: Makefile.common
================================================
# Copyright 2018 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A common Makefile that includes rules to be reused in different prometheus projects.
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
# Example usage :
# Create the main Makefile in the root project directory.
# include Makefile.common
# customTarget:
# @echo ">> Running customTarget"
#
# Ensure GOBIN is not set during build so that promu is installed to the correct path
unexport GOBIN
GO ?= go
GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
GOOPTS ?=
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version)
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = ./...
ifeq (arm, $(GOHOSTARCH))
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v2.1.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT))
GOLANGCI_LINT :=
else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
endif
endif
endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
test-flags := -race
endif
endif
# This rule is used to forward a target like "build" to "common-build". This
# allows a new "build" target to be defined in a Makefile which includes this
# one and override "common-build" without override warnings.
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@echo ">> checking code style"
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
if [ -n "$${fmtRes}" ]; then \
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
exit 1; \
fi
.PHONY: common-check_license
common-check_license:
@echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
.PHONY: common-deps
common-deps:
@echo ">> getting dependencies"
$(GO) mod download
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get -d $$m; \
done
$(GO) mod tidy
.PHONY: common-test-short
common-test-short: $(GOTEST_DIR)
@echo ">> running short tests"
$(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test
common-test: $(GOTEST_DIR)
@echo ">> running all tests"
$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR):
@mkdir -p $@
.PHONY: common-format
common-format:
@echo ">> formatting code"
$(GO) fmt $(pkgs)
.PHONY: common-vet
common-vet:
@echo ">> vetting code"
$(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-lint
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
.PHONY: common-unused
common-unused:
@echo ">> running check for unused/missing packages in go.mod"
$(GO) mod tidy
@git diff --exit-code -- go.sum go.mod
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
$(DOCKERBUILD_CONTEXT)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
$(PROMU):
$(eval PROMU_TMP := $(shell mktemp -d))
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
mkdir -p $(FIRST_GOPATH)/bin
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
.PHONY: proto
proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh
ifdef GOLANGCI_LINT
$(GOLANGCI_LINT):
mkdir -p $(FIRST_GOPATH)/bin
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
| sed -e '/install -d/d' \
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif
.PHONY: precheck
precheck::
define PRECHECK_COMMAND_template =
precheck:: $(1)_precheck
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
.PHONY: $(1)_precheck
$(1)_precheck:
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
exit 1; \
fi
endef
govulncheck: install-govulncheck
govulncheck ./...
install-govulncheck:
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
================================================
FILE: README.md
================================================
# ZFS Exporter
[](https://github.com/pdf/zfs_exporter/actions/workflows/test.yml)
[](https://github.com/pdf/zfs_exporter/actions/workflows/release.yml)
[](https://goreportcard.com/report/github.com/pdf/zfs_exporter)
[](https://github.com/pdf/zfs_exporter/blob/master/LICENSE)
Prometheus exporter for ZFS (pools, filesystems, snapshots and volumes). Other implementations exist, however performance can be quite variable, producing occasional timeouts (and associated alerts). This exporter was built with a few features aimed at allowing users to avoid collecting more than they need to, and to ensure timeouts cannot occur, but that we eventually return useful data:
- **Pool selection** - allow the user to select which pools are collected
- **Multiple collectors** - allow the user to select which data types are collected (pools, filesystems, snapshots and volumes)
- **Property selection** - allow the user to select which properties are collected per data type (enabling only required properties will increase collector performance, by reducing metadata queries)
- **Collection deadline and caching** - if the collection duration exceeds the configured deadline, cached data from the last run will be returned for any metrics that have not yet been collected, and the current collection run will continue in the background. Collections will not run concurrently, so that when a system is running slowly, we don't compound the problem - if an existing collection is still running, cached data will be returned.
## Installation
Download the [latest release](https://github.com/pdf/zfs_exporter/releases/latest) for your platform, and unpack it somewhere on your filesystem.
You may also build the latest version using Go v1.11 - 1.17 via `go get`:
```bash
go get -u github.com/pdf/zfs_exporter
```
Installation can also be accomplished using `go install`:
```bash
version=latest # or a specific version tag
go install github.com/pdf/zfs_exporter/v2@$version
```
## Usage
```
usage: zfs_exporter [<flags>]
Flags:
-h, --[no-]help Show context-sensitive help (also try --help-long and --help-man).
--[no-]collector.dataset-filesystem
Enable the dataset-filesystem collector (default: enabled)
--properties.dataset-filesystem="available,logicalused,quota,referenced,used,usedbydataset,written"
Properties to include for the dataset-filesystem collector, comma-separated.
--[no-]collector.dataset-snapshot
Enable the dataset-snapshot collector (default: disabled)
--properties.dataset-snapshot="logicalused,referenced,used,written"
Properties to include for the dataset-snapshot collector, comma-separated.
--[no-]collector.dataset-volume
Enable the dataset-volume collector (default: enabled)
--properties.dataset-volume="available,logicalused,referenced,used,usedbydataset,volsize,written"
Properties to include for the dataset-volume collector, comma-separated.
--[no-]collector.pool Enable the pool collector (default: enabled)
--properties.pool="allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size"
Properties to include for the pool collector, comma-separated.
--web.telemetry-path="/metrics"
Path under which to expose metrics.
--[no-]web.disable-exporter-metrics
Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).
--deadline=8s Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when
complete (default: 8s)
--pool=POOL ... Name of the pool(s) to collect, repeat for multiple pools (default: all pools).
--exclude=EXCLUDE ... Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times.
--[no-]web.systemd-socket Use systemd socket activation listeners instead of port listeners (Linux only).
--web.listen-address=:9134 ...
Addresses on which to expose metrics and web interface. Repeatable for multiple addresses. Examples: `:9100` or `[::1]:9100` for http, `vsock://:9100` for vsock
--web.config.file="" Path to configuration file that can enable TLS or authentication. See: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md
--log.level=info Only log messages with the given severity or above. One of: [debug, info, warn, error]
--log.format=logfmt Output format of log messages. One of: [logfmt, json]
--[no-]version Show application version.
```
Collectors that are enabled by default can be negated by prefixing the flag with `--no-*`, ie:
```
zfs_exporter --no-collector.dataset-filesystem
```
## TLS endpoint
**EXPERIMENTAL**
The exporter supports TLS via a new web configuration file.
```console
./zfs_exporter --web.config.file=web-config.yml
```
See the [exporter-toolkit https package](https://github.com/prometheus/exporter-toolkit/blob/v0.1.0/https/README.md) for more details.
## Caveats
The collector may need to be run as root on some platforms (ie - Linux prior to ZFS v0.7.0).
Whilst inspiration was taken from some of the alternative ZFS collectors, metric names may not be compatible.
## Alternatives
In no particular order, here are some alternative implementations:
- https://github.com/eliothedeman/zfs_exporter
- https://github.com/ncabatoff/zfs-exporter
- https://github.com/eripa/prometheus-zfs
================================================
FILE: VERSION
================================================
2.3.12
================================================
FILE: collector/cache.go
================================================
package collector
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
type metricCache struct {
cache map[string]prometheus.Metric
sync.RWMutex
}
func (c *metricCache) add(m metric) {
c.Lock()
defer c.Unlock()
c.cache[m.name] = m.prometheus
}
func (c *metricCache) merge(other *metricCache) {
if c == other {
return
}
c.Lock()
other.RLock()
defer func() {
other.RUnlock()
c.Unlock()
}()
for name, value := range other.cache {
c.cache[name] = value
}
}
func (c *metricCache) replace(other *metricCache) {
c.Lock()
defer c.Unlock()
c.cache = other.cache
}
func (c *metricCache) index() map[string]struct{} {
c.RLock()
defer c.RUnlock()
index := make(map[string]struct{}, len(c.cache))
for name := range c.cache {
index[name] = struct{}{}
}
return index
}
func newMetricCache() *metricCache {
return &metricCache{cache: make(map[string]prometheus.Metric)}
}
================================================
FILE: collector/collector.go
================================================
package collector
import (
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultEnabled = true
defaultDisabled = false
namespace = `zfs`
helpDefaultStateEnabled = `enabled`
helpDefaultStateDisabled = `disabled`
subsystemDataset = `dataset`
subsystemPool = `pool`
propertyUnsupportedDesc = `!!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!`
propertyUnsupportedMsg = `Unsupported dataset property, results are likely to be undesirable`
helpIssue = `Please file an issue at https://github.com/pdf/zfs_exporter/issues`
)
var (
collectorStates = make(map[string]State)
scrapeDurationDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_duration_seconds`)
scrapeDurationDesc = prometheus.NewDesc(
scrapeDurationDescName,
`zfs_exporter: Duration of a collector scrape.`,
[]string{`collector`},
nil,
)
scrapeSuccessDescName = prometheus.BuildFQName(namespace, `scrape`, `collector_success`)
scrapeSuccessDesc = prometheus.NewDesc(
scrapeSuccessDescName,
`zfs_exporter: Whether a collector succeeded.`,
[]string{`collector`},
nil,
)
errUnsupportedProperty = errors.New(`unsupported property`)
)
type factoryFunc func(l *slog.Logger, c zfs.Client, properties []string) (Collector, error)
type transformFunc func(string) (float64, error)
// State holds metadata for managing collector status
type State struct {
Name string
Enabled *bool
Properties *string
factory factoryFunc
}
// Collector defines the minimum functionality for registering a collector
type Collector interface {
update(ch chan<- metric, pools []string, excludes regexpCollection) error
describe(ch chan<- *prometheus.Desc)
}
type metric struct {
name string
prometheus prometheus.Metric
}
type property struct {
name string
desc *prometheus.Desc
transform transformFunc
kind prometheus.ValueType
}
func (p property) push(ch chan<- metric, value string, labelValues ...string) error {
v, err := p.transform(value)
if err != nil {
return err
}
ch <- metric{
name: expandMetricName(p.name, labelValues...),
prometheus: prometheus.MustNewConstMetric(
p.desc,
p.kind,
v,
labelValues...,
),
}
return nil
}
type propertyStore struct {
defaultSubsystem string
defaultLabels []string
store map[string]property
}
func (p *propertyStore) find(name string) (property, error) {
prop, ok := p.store[name]
if !ok {
prop = newProperty(
p.defaultSubsystem,
name,
propertyUnsupportedDesc,
transformNumeric,
prometheus.GaugeValue,
p.defaultLabels...,
)
return prop, errUnsupportedProperty
}
return prop, nil
}
func registerCollector(collector string, isDefaultEnabled bool, defaultProps string, factory factoryFunc) {
helpDefaultState := helpDefaultStateDisabled
if isDefaultEnabled {
helpDefaultState = helpDefaultStateEnabled
}
enabledFlagName := fmt.Sprintf("collector.%s", collector)
enabledFlagHelp := fmt.Sprintf("Enable the %s collector (default: %s)", collector, helpDefaultState)
enabledDefaultValue := strconv.FormatBool(isDefaultEnabled)
propsFlagName := fmt.Sprintf("properties.%s", collector)
propsFlagHelp := fmt.Sprintf("Properties to include for the %s collector, comma-separated.", collector)
enabledFlag := kingpin.Flag(enabledFlagName, enabledFlagHelp).Default(enabledDefaultValue).Bool()
propsFlag := kingpin.Flag(propsFlagName, propsFlagHelp).Default(defaultProps).String()
collectorStates[collector] = State{
Enabled: enabledFlag,
Properties: propsFlag,
factory: factory,
}
}
func expandMetricName(prefix string, context ...string) string {
return strings.Join(append(context, prefix), `-`)
}
func newProperty(subsystem, metricName, helpText string, transform transformFunc, kind prometheus.ValueType, labels ...string) property {
name := prometheus.BuildFQName(namespace, subsystem, metricName)
return property{
name: name,
desc: prometheus.NewDesc(name, helpText, labels, nil),
transform: transform,
kind: kind,
}
}
================================================
FILE: collector/collector_test.go
================================================
package collector
import (
"bytes"
"context"
"io"
"log/slog"
"time"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
)
var logger = slog.New(slog.NewTextHandler(io.Discard, nil))
func callCollector(ctx context.Context, collector prometheus.Collector, metricResults []byte, metricNames []string) error {
result := make(chan error)
go func() {
result <- testutil.CollectAndCompare(collector, bytes.NewBuffer(metricResults), metricNames...)
}()
select {
case err := <-result:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func defaultConfig(z zfs.Client) ZFSConfig {
return ZFSConfig{
DisableMetrics: true,
Deadline: 5 * time.Minute,
Logger: logger,
ZFSClient: z,
}
}
func stringPointer(s string) *string {
return &s
}
func boolPointer(b bool) *bool {
return &b
}
================================================
FILE: collector/dataset.go
================================================
package collector
import (
"fmt"
"log/slog"
"sync"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultFilesystemProps = `available,logicalused,quota,referenced,used,usedbydataset,written`
defaultSnapshotProps = `logicalused,referenced,used,written`
defaultVolumeProps = `available,logicalused,referenced,used,usedbydataset,volsize,written`
)
var (
datasetLabels = []string{`name`, `pool`, `type`}
datasetProperties = propertyStore{
defaultSubsystem: subsystemDataset,
defaultLabels: datasetLabels,
store: map[string]property{
`available`: newProperty(
subsystemDataset,
`available_bytes`,
`The amount of space in bytes available to the dataset and all its children.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`compressratio`: newProperty(
subsystemDataset,
`compression_ratio`,
`The ratio of compressed size vs uncompressed size for this dataset.`,
transformMultiplier,
prometheus.GaugeValue,
datasetLabels...,
),
`logicalused`: newProperty(
subsystemDataset,
`logical_used_bytes`,
`The amount of space in bytes that is "logically" consumed by this dataset and all its descendents. See the "used_bytes" property.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`logicalreferenced`: newProperty(
subsystemDataset,
`logical_referenced_bytes`,
`The amount of space that is "logically" accessible by this dataset. See the "referenced_bytes" property.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`quota`: newProperty(
subsystemDataset,
`quota_bytes`,
`The maximum amount of space in bytes this dataset and its descendents can consume.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`refcompressratio`: newProperty(
subsystemDataset,
`referenced_compression_ratio`,
`The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the "compression_ratio" property.`,
transformMultiplier,
prometheus.GaugeValue,
datasetLabels...,
),
`referenced`: newProperty(
subsystemDataset,
`referenced_bytes`,
`The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`refquota`: newProperty(
subsystemDataset,
`referenced_quota_bytes`,
`The maximum amount of space in bytes this dataset can consume.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`refreservation`: newProperty(
subsystemDataset,
`referenced_reservation_bytes`,
`The minimum amount of space in bytes guaranteed to this dataset.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`reservation`: newProperty(
subsystemDataset,
`reservation_bytes`,
`The minimum amount of space in bytes guaranteed to a dataset and its descendants.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`snapshot_count`: newProperty(
subsystemDataset,
`snapshot_count_total`,
`The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`snapshot_limit`: newProperty(
subsystemDataset,
`snapshot_limit_total`,
`The total limit on the number of snapshots that can be created on a dataset and its descendents.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`used`: newProperty(
subsystemDataset,
`used_bytes`,
`The amount of space in bytes consumed by this dataset and all its descendents.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`usedbychildren`: newProperty(
subsystemDataset,
`used_by_children_bytes`,
`The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`usedbydataset`: newProperty(
subsystemDataset,
`used_by_dataset_bytes`,
`The amount of space in bytes used by this dataset itself, which would be freed if the dataset were destroyed.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`usedbyrefreservation`: newProperty(
subsystemDataset,
`used_by_referenced_reservation_bytes`,
`The amount of space in bytes used by a refreservation set on this dataset, which would be freed if the refreservation was removed.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`usedbysnapshots`: newProperty(
subsystemDataset,
`used_by_snapshot_bytes`,
`The amount of space in bytes consumed by snapshots of this dataset.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`volsize`: newProperty(
subsystemDataset,
`volume_size_bytes`,
`The logical size in bytes of this volume.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`written`: newProperty(
subsystemDataset,
`written_bytes`,
`The amount of referenced space in bytes written to this dataset since the previous snapshot.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
`creation`: newProperty(
subsystemDataset,
`creation_timestamp`,
`The unix timestamp when this dataset was created.`,
transformNumeric,
prometheus.GaugeValue,
datasetLabels...,
),
},
}
)
func init() {
registerCollector(`dataset-filesystem`, defaultEnabled, defaultFilesystemProps, newFilesystemCollector)
registerCollector(`dataset-snapshot`, defaultDisabled, defaultSnapshotProps, newSnapshotCollector)
registerCollector(`dataset-volume`, defaultEnabled, defaultVolumeProps, newVolumeCollector)
}
type datasetCollector struct {
kind zfs.DatasetKind
log *slog.Logger
client zfs.Client
props []string
}
func (c *datasetCollector) describe(ch chan<- *prometheus.Desc) {
for _, k := range c.props {
prop, err := datasetProperties.find(k)
if err != nil {
c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)
continue
}
ch <- prop.desc
}
}
func (c *datasetCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error {
var wg sync.WaitGroup
errChan := make(chan error, len(pools))
for _, pool := range pools {
wg.Add(1)
go func(pool string) {
if err := c.updatePoolMetrics(ch, pool, excludes); err != nil {
errChan <- err
}
wg.Done()
}(pool)
}
wg.Wait()
select {
case err := <-errChan:
return err
default:
return nil
}
}
func (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool string, excludes regexpCollection) error {
datasets := c.client.Datasets(pool, c.kind)
props, err := datasets.Properties(c.props...)
if err != nil {
return err
}
for _, dataset := range props {
if excludes.MatchString(dataset.DatasetName()) {
continue
}
if err = c.updateDatasetMetrics(ch, pool, dataset); err != nil {
return err
}
}
return nil
}
func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool string, dataset zfs.DatasetProperties) error {
labelValues := []string{dataset.DatasetName(), pool, string(c.kind)}
for k, v := range dataset.Properties() {
prop, err := datasetProperties.find(k)
if err != nil {
c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, c.kind, `property`, k, `err`, err)
}
if err = prop.push(ch, v, labelValues...); err != nil {
return err
}
}
return nil
}
func newDatasetCollector(kind zfs.DatasetKind, l *slog.Logger, c zfs.Client, props []string) (Collector, error) {
switch kind {
case zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume:
default:
return nil, fmt.Errorf("unknown dataset type: %s", kind)
}
return &datasetCollector{kind: kind, log: l, client: c, props: props}, nil
}
func newFilesystemCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetFilesystem, l, c, props)
}
func newSnapshotCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetSnapshot, l, c, props)
}
func newVolumeCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {
return newDatasetCollector(zfs.DatasetVolume, l, c, props)
}
================================================
FILE: collector/dataset_test.go
================================================
package collector
import (
"context"
"strings"
"testing"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs"
"go.uber.org/mock/gomock"
)
type datasetResults struct {
name string
results map[string]string
}
func TestDatsetMetrics(t *testing.T) {
testCases := []struct {
name string
kinds []zfs.DatasetKind
pools []string
explicitPools []string
propsRequested []string
metricNames []string
propsResults map[string][]datasetResults
metricResults string
}{
{
name: `all metrics`,
kinds: []zfs.DatasetKind{zfs.DatasetFilesystem},
pools: []string{`testpool`},
propsRequested: []string{`available`, `compressratio`, `logicalused`, `logicalreferenced`, `quota`, `refcompressratio`, `referenced`, `refquota`, `refreservation`, `reservation`, `snapshot_count`, `snapshot_limit`, `used`, `usedbychildren`, `usedbydataset`, `usedbyrefreservation`, `usedbysnapshots`, `volsize`, `written`, `creation`},
metricNames: []string{`zfs_dataset_available_bytes`, `zfs_dataset_compression_ratio`, `zfs_dataset_logical_used_bytes`, `zfs_dataset_logical_referenced_bytes`, `zfs_dataset_quota_bytes`, `zfs_dataset_referenced_compression_ratio`, `zfs_dataset_referenced_bytes`, `zfs_dataset_referenced_quota_bytes`, `zfs_dataset_reservation_bytes`, `zfs_dataset_snapshot_count_total`, `zfs_datset_snapshot_limit_total`, `zfs_dataset_used_bytes`, `zfs_dataset_used_by_children_bytes`, `zfs_dataset_used_by_datset_bytes`, `zfs_datset_used_by_referenced_reservation_bytes`, `zfs_dataset_used_by_snapshot_bytes`, `zfs_dataset_volume_size_bytes`, `zfs_dataset_written_bytes`},
propsResults: map[string][]datasetResults{
`testpool`: {
{
name: `testpool/test`,
results: map[string]string{
`available`: `1024`,
`compressratio`: `2.50`,
`logicalused`: `1024`,
`logicalreferenced`: `512`,
`quota`: `512`,
`refcompressratio`: `24.00`,
`referenced`: `1024`,
`refreservation`: `1024`,
`reservation`: `1024`,
`snapshot_count`: `12`,
`snapshot_limit`: `24`,
`used`: `1024`,
`usedbychildren`: `1024`,
`usedbydataset`: `1024`,
`usedbyrefreservation`: `1024`,
`usedbysnapshots`: `1024`,
`volsize`: `1024`,
`written`: `1024`,
`creation`: `1756033110`,
},
},
},
},
metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.
# TYPE zfs_dataset_available_bytes gauge
zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_compression_ratio The ratio of compressed size vs uncompressed size for this dataset.
# TYPE zfs_dataset_compression_ratio gauge
zfs_dataset_compression_ratio{name="testpool/test",pool="testpool",type="filesystem"} 0.4
# HELP zfs_dataset_logical_used_bytes The amount of space in bytes that is "logically" consumed by this dataset and all its descendents. See the "used_bytes" property.
# TYPE zfs_dataset_logical_used_bytes gauge
zfs_dataset_logical_used_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_logical_referenced_bytes The amount of space that is "logically" accessible by this dataset. See the "referenced_bytes" property.
# TYPE zfs_dataset_logical_referenced_bytes gauge
zfs_dataset_logical_referenced_bytes{name="testpool/test",pool="testpool",type="filesystem"} 512
# HELP zfs_dataset_quota_bytes The maximum amount of space in bytes this dataset and its descendents can consume.
# TYPE zfs_dataset_quota_bytes gauge
zfs_dataset_quota_bytes{name="testpool/test",pool="testpool",type="filesystem"} 512
# HELP zfs_dataset_referenced_bytes The amount of data in bytes that is accessible by this dataset, which may or may not be shared with other datasets in the pool.
# TYPE zfs_dataset_referenced_bytes gauge
zfs_dataset_referenced_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_referenced_compression_ratio The ratio of compressed size vs uncompressed size for the referenced space of this dataset. See also the "compression_ratio" property.
# TYPE zfs_dataset_referenced_compression_ratio gauge
zfs_dataset_referenced_compression_ratio{name="testpool/test",pool="testpool",type="filesystem"} 0.041666666666666664
# HELP zfs_dataset_reservation_bytes The minimum amount of space in bytes guaranteed to a dataset and its descendants.
# TYPE zfs_dataset_reservation_bytes gauge
zfs_dataset_reservation_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_snapshot_count_total The total number of snapshots that exist under this location in the dataset tree. This value is only available when a snapshot_limit has been set somewhere in the tree under which the dataset resides.
# TYPE zfs_dataset_snapshot_count_total gauge
zfs_dataset_snapshot_count_total{name="testpool/test",pool="testpool",type="filesystem"} 12
# HELP zfs_dataset_used_by_children_bytes The amount of space in bytes used by children of this dataset, which would be freed if all the dataset's children were destroyed.
# TYPE zfs_dataset_used_by_children_bytes gauge
zfs_dataset_used_by_children_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_used_by_snapshot_bytes The amount of space in bytes consumed by snapshots of this dataset.
# TYPE zfs_dataset_used_by_snapshot_bytes gauge
zfs_dataset_used_by_snapshot_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_used_bytes The amount of space in bytes consumed by this dataset and all its descendents.
# TYPE zfs_dataset_used_bytes gauge
zfs_dataset_used_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_volume_size_bytes The logical size in bytes of this volume.
# TYPE zfs_dataset_volume_size_bytes gauge
zfs_dataset_volume_size_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_written_bytes The amount of referenced space in bytes written to this dataset since the previous snapshot.
# TYPE zfs_dataset_written_bytes gauge
zfs_dataset_written_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
# HELP zfs_dataset_creation_timestamp The unix timestamp when this dataset was created.
# TYPE zfs_dataset_creation_timestamp gauge
zfs_dataset_creation_timestamp{name="testpool/test",pool="testpool",type="filesystem"} 1756033110
`,
},
{
name: `multiple pools`,
kinds: []zfs.DatasetKind{zfs.DatasetFilesystem},
pools: []string{`testpool1`, `testpool2`},
propsRequested: []string{`available`},
metricNames: []string{`zfs_dataset_available_bytes`},
propsResults: map[string][]datasetResults{
`testpool1`: {
{
name: `testpool1/test`,
results: map[string]string{
`available`: `1024`,
},
},
},
`testpool2`: {
{
name: `testpool2/test`,
results: map[string]string{
`available`: `1024`,
},
},
},
},
metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.
# TYPE zfs_dataset_available_bytes gauge
zfs_dataset_available_bytes{name="testpool1/test",pool="testpool1",type="filesystem"} 1024
zfs_dataset_available_bytes{name="testpool2/test",pool="testpool2",type="filesystem"} 1024
`,
},
{
name: `explicit pools`,
kinds: []zfs.DatasetKind{zfs.DatasetFilesystem},
pools: []string{`testpool1`, `testpool2`},
explicitPools: []string{`testpool1`},
propsRequested: []string{`available`},
metricNames: []string{`zfs_dataset_available_bytes`},
propsResults: map[string][]datasetResults{
`testpool1`: {
{
name: `testpool1/test`,
results: map[string]string{
`available`: `1024`,
},
},
},
`testpool2`: {
{
name: `testpool2/test`,
results: map[string]string{
`available`: `1024`,
},
},
},
},
metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.
# TYPE zfs_dataset_available_bytes gauge
zfs_dataset_available_bytes{name="testpool1/test",pool="testpool1",type="filesystem"} 1024
`,
},
{
name: `multiple collectors`,
kinds: []zfs.DatasetKind{zfs.DatasetFilesystem, zfs.DatasetSnapshot, zfs.DatasetVolume},
pools: []string{`testpool`},
propsRequested: []string{`available`},
metricNames: []string{`zfs_dataset_available_bytes`},
propsResults: map[string][]datasetResults{
`testpool`: {
{
name: `testpool/test`,
results: map[string]string{
`available`: `1024`,
},
},
},
},
metricResults: `# HELP zfs_dataset_available_bytes The amount of space in bytes available to the dataset and all its children.
# TYPE zfs_dataset_available_bytes gauge
zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="filesystem"} 1024
zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="snapshot"} 1024
zfs_dataset_available_bytes{name="testpool/test",pool="testpool",type="volume"} 1024
`,
},
{
name: `unsupported metric`,
kinds: []zfs.DatasetKind{zfs.DatasetFilesystem},
pools: []string{`testpool`},
propsRequested: []string{`unsupported`},
metricNames: []string{`zfs_dataset_unsupported`},
propsResults: map[string][]datasetResults{
`testpool`: {
{
name: `testpool/test`,
results: map[string]string{
`unsupported`: `1024`,
},
},
},
},
metricResults: `# HELP zfs_dataset_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!
# TYPE zfs_dataset_unsupported gauge
zfs_dataset_unsupported{name="testpool/test",pool="testpool",type="filesystem"} 1024
`,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctrl, ctx := gomock.WithContext(context.Background(), t)
zfsClient := mock_zfs.NewMockClient(ctrl)
config := defaultConfig(zfsClient)
if tc.explicitPools != nil {
config.Pools = tc.explicitPools
}
zfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1)
collector, err := NewZFS(config)
if err != nil {
t.Fatal(err)
}
collector.Collectors = make(map[string]State)
for _, kind := range tc.kinds {
switch kind {
case zfs.DatasetFilesystem:
collector.Collectors[`dataset-filesystem`] = State{
Name: "dataset-filesystem",
Enabled: boolPointer(true),
Properties: stringPointer(strings.Join(tc.propsRequested, `,`)),
factory: newFilesystemCollector,
}
case zfs.DatasetSnapshot:
collector.Collectors[`dataset-snapshot`] = State{
Name: "dataset-snapshot",
Enabled: boolPointer(true),
Properties: stringPointer(strings.Join(tc.propsRequested, `,`)),
factory: newSnapshotCollector,
}
case zfs.DatasetVolume:
collector.Collectors[`dataset-volume`] = State{
Name: "dataset-volume",
Enabled: boolPointer(true),
Properties: stringPointer(strings.Join(tc.propsRequested, `,`)),
factory: newVolumeCollector,
}
}
for _, pool := range tc.pools {
if tc.explicitPools != nil {
wanted := false
for _, explicit := range tc.explicitPools {
if pool == explicit {
wanted = true
}
break
}
if !wanted {
continue
}
}
zfsDatasetResults := make([]zfs.DatasetProperties, len(tc.propsResults[pool]))
for i, propResults := range tc.propsResults[pool] {
zfsDatasetProperties := mock_zfs.NewMockDatasetProperties(ctrl)
zfsDatasetProperties.EXPECT().DatasetName().Return(propResults.name).Times(2)
zfsDatasetProperties.EXPECT().Properties().Return(propResults.results).Times(1)
zfsDatasetResults[i] = zfsDatasetProperties
}
zfsDatasets := mock_zfs.NewMockDatasets(ctrl)
zfsDatasets.EXPECT().Properties(tc.propsRequested).Return(zfsDatasetResults, nil).Times(1)
zfsClient.EXPECT().Datasets(pool, kind).Return(zfsDatasets).Times(1)
}
}
if err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil {
t.Fatal(err)
}
})
}
}
================================================
FILE: collector/pool.go
================================================
package collector
import (
"fmt"
"log/slog"
"sync"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultPoolProps = `allocated,dedupratio,fragmentation,free,freeing,health,leaked,readonly,size`
)
var (
poolLabels = []string{`pool`}
poolProperties = propertyStore{
defaultSubsystem: subsystemPool,
defaultLabels: poolLabels,
store: map[string]property{
`allocated`: newProperty(
subsystemPool,
`allocated_bytes`,
`Amount of storage in bytes used within the pool.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
`dedupratio`: newProperty(
subsystemPool,
`deduplication_ratio`,
`The ratio of deduplicated size vs undeduplicated size for data in this pool.`,
transformMultiplier,
prometheus.GaugeValue,
poolLabels...,
),
`capacity`: newProperty(
subsystemPool,
`capacity_ratio`,
`Ratio of pool space used.`,
transformPercentage,
prometheus.GaugeValue,
poolLabels...,
),
`expandsize`: newProperty(
subsystemPool,
`expand_size_bytes`,
`Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
`fragmentation`: newProperty(
subsystemPool,
`fragmentation_ratio`,
`The fragmentation ratio of the pool.`,
transformPercentage,
prometheus.GaugeValue,
poolLabels...,
),
`free`: newProperty(
subsystemPool,
`free_bytes`,
`The amount of free space in bytes available in the pool.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
`freeing`: newProperty(
subsystemPool,
`freeing_bytes`,
`The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
`health`: newProperty(
subsystemPool,
`health`,
fmt.Sprintf("Health status code for the pool [%d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s, %d: %s].",
poolOnline, zfs.PoolOnline,
poolDegraded, zfs.PoolDegraded,
poolFaulted, zfs.PoolFaulted,
poolOffline, zfs.PoolOffline,
poolUnavail, zfs.PoolUnavail,
poolRemoved, zfs.PoolRemoved,
poolSuspended, zfs.PoolSuspended,
),
transformHealthCode,
prometheus.GaugeValue,
poolLabels...,
),
`leaked`: newProperty(
subsystemPool,
`leaked_bytes`,
`Number of leaked bytes in the pool.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
`readonly`: newProperty(
subsystemPool,
`readonly`,
`Read-only status of the pool [0: read-write, 1: read-only].`,
transformBool,
prometheus.GaugeValue,
poolLabels...,
),
`size`: newProperty(
subsystemPool,
`size_bytes`,
`Total size in bytes of the storage pool.`,
transformNumeric,
prometheus.GaugeValue,
poolLabels...,
),
},
}
)
func init() {
registerCollector(`pool`, defaultEnabled, defaultPoolProps, newPoolCollector)
}
type poolCollector struct {
log *slog.Logger
client zfs.Client
props []string
}
func (c *poolCollector) describe(ch chan<- *prometheus.Desc) {
for _, k := range c.props {
prop, err := poolProperties.find(k)
if err != nil {
c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err)
continue
}
ch <- prop.desc
}
}
func (c *poolCollector) update(ch chan<- metric, pools []string, excludes regexpCollection) error {
var wg sync.WaitGroup
errChan := make(chan error, len(pools))
for _, pool := range pools {
wg.Add(1)
go func(pool string) {
if err := c.updatePoolMetrics(ch, pool); err != nil {
errChan <- err
}
wg.Done()
}(pool)
}
wg.Wait()
select {
case err := <-errChan:
return err
default:
return nil
}
}
func (c *poolCollector) updatePoolMetrics(ch chan<- metric, pool string) error {
p := c.client.Pool(pool)
props, err := p.Properties(c.props...)
if err != nil {
return err
}
labelValues := []string{pool}
for k, v := range props.Properties() {
prop, err := poolProperties.find(k)
if err != nil {
c.log.Warn(propertyUnsupportedMsg, `help`, helpIssue, `collector`, `pool`, `property`, k, `err`, err)
}
if err = prop.push(ch, v, labelValues...); err != nil {
return err
}
}
return nil
}
func newPoolCollector(l *slog.Logger, c zfs.Client, props []string) (Collector, error) {
return &poolCollector{log: l, client: c, props: props}, nil
}
================================================
FILE: collector/pool_test.go
================================================
package collector
import (
"context"
"strings"
"testing"
"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs"
"go.uber.org/mock/gomock"
)
func TestPoolMetrics(t *testing.T) {
testCases := []struct {
name string
pools []string
explicitPools []string
propsRequested []string
metricNames []string
propsResults map[string]map[string]string
metricResults string
}{
{
name: `all metrics`,
pools: []string{`testpool`},
propsRequested: []string{`allocated`, `dedupratio`, `capacity`, `expandsize`, `fragmentation`, `free`, `freeing`, `health`, `leaked`, `readonly`, `size`},
metricNames: []string{`zfs_pool_allocated_bytes`, `zfs_pool_deduplication_ratio`, `zfs_pool_capacity_ratio`, `zfs_pool_expand_size_bytes`, `zfs_pool_fragmentation_ratio`, `zfs_pool_free_bytes`, `zfs_pool_freeing_bytes`, `zfs_pool_health`, `zfs_pool_leaked_bytes`, `zfs_pool_readonly`, `zfs_pool_size_bytes`},
propsResults: map[string]map[string]string{
`testpool`: {
`allocated`: `1024`,
`dedupratio`: `2.50`,
`capacity`: `50`,
`expandsize`: `2048`,
`fragmentation`: `25`,
`free`: `1024`,
`freeing`: `0`,
`health`: `ONLINE`,
`leaked`: `1`,
`readonly`: `off`,
`size`: `2048`,
},
},
metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.
# TYPE zfs_pool_allocated_bytes gauge
zfs_pool_allocated_bytes{pool="testpool"} 1024
# HELP zfs_pool_capacity_ratio Ratio of pool space used.
# TYPE zfs_pool_capacity_ratio gauge
zfs_pool_capacity_ratio{pool="testpool"} 0.5
# HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool.
# TYPE zfs_pool_deduplication_ratio gauge
zfs_pool_deduplication_ratio{pool="testpool"} 0.4
# HELP zfs_pool_expand_size_bytes Amount of uninitialized space within the pool or device that can be used to increase the total capacity of the pool.
# TYPE zfs_pool_expand_size_bytes gauge
zfs_pool_expand_size_bytes{pool="testpool"} 2048
# HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool.
# TYPE zfs_pool_fragmentation_ratio gauge
zfs_pool_fragmentation_ratio{pool="testpool"} 0.25
# HELP zfs_pool_free_bytes The amount of free space in bytes available in the pool.
# TYPE zfs_pool_free_bytes gauge
zfs_pool_free_bytes{pool="testpool"} 1024
# HELP zfs_pool_freeing_bytes The amount of space in bytes remaining to be freed following the destruction of a file system or snapshot.
# TYPE zfs_pool_freeing_bytes gauge
zfs_pool_freeing_bytes{pool="testpool"} 0
# HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED].
# TYPE zfs_pool_health gauge
zfs_pool_health{pool="testpool"} 0
# HELP zfs_pool_leaked_bytes Number of leaked bytes in the pool.
# TYPE zfs_pool_leaked_bytes gauge
zfs_pool_leaked_bytes{pool="testpool"} 1
# HELP zfs_pool_readonly Read-only status of the pool [0: read-write, 1: read-only].
# TYPE zfs_pool_readonly gauge
zfs_pool_readonly{pool="testpool"} 0
# HELP zfs_pool_size_bytes Total size in bytes of the storage pool.
# TYPE zfs_pool_size_bytes gauge
zfs_pool_size_bytes{pool="testpool"} 2048
`,
},
{
name: `multiple pools`,
pools: []string{`testpool1`, `testpool2`},
propsRequested: []string{`allocated`},
metricNames: []string{`zfs_pool_allocated_bytes`},
propsResults: map[string]map[string]string{
`testpool1`: {
`allocated`: `1024`,
},
`testpool2`: {
`allocated`: `2048`,
},
},
metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.
# TYPE zfs_pool_allocated_bytes gauge
zfs_pool_allocated_bytes{pool="testpool1"} 1024
zfs_pool_allocated_bytes{pool="testpool2"} 2048
`,
},
{
name: `explicit pools`,
pools: []string{`testpool1`, `testpool2`},
explicitPools: []string{`testpool1`},
propsRequested: []string{`allocated`},
metricNames: []string{`zfs_pool_allocated_bytes`},
propsResults: map[string]map[string]string{
`testpool1`: {
`allocated`: `1024`,
},
`testpool2`: {
`allocated`: `2048`,
},
},
metricResults: `# HELP zfs_pool_allocated_bytes Amount of storage in bytes used within the pool.
# TYPE zfs_pool_allocated_bytes gauge
zfs_pool_allocated_bytes{pool="testpool1"} 1024
`,
},
{
name: `health status`,
pools: []string{`onlinepool`, `degradedpool`, `faultedpool`, `offlinepool`, `unavailpool`, `removedpool`, `suspendedpool`},
propsRequested: []string{`health`},
metricNames: []string{`zfs_pool_health`},
propsResults: map[string]map[string]string{
`onlinepool`: {
`health`: `ONLINE`,
},
`degradedpool`: {
`health`: `DEGRADED`,
},
`faultedpool`: {
`health`: `FAULTED`,
},
`offlinepool`: {
`health`: `OFFLINE`,
},
`unavailpool`: {
`health`: `UNAVAIL`,
},
`removedpool`: {
`health`: `REMOVED`,
},
`suspendedpool`: {
`health`: `SUSPENDED`,
},
},
metricResults: `# HELP zfs_pool_health Health status code for the pool [0: ONLINE, 1: DEGRADED, 2: FAULTED, 3: OFFLINE, 4: UNAVAIL, 5: REMOVED, 6: SUSPENDED].
# TYPE zfs_pool_health gauge
zfs_pool_health{pool="onlinepool"} 0
zfs_pool_health{pool="degradedpool"} 1
zfs_pool_health{pool="faultedpool"} 2
zfs_pool_health{pool="offlinepool"} 3
zfs_pool_health{pool="unavailpool"} 4
zfs_pool_health{pool="removedpool"} 5
zfs_pool_health{pool="suspendedpool"} 6
`,
},
{
name: `unsupported metric`,
pools: []string{`testpool`},
propsRequested: []string{`unsupported`},
metricNames: []string{`zfs_pool_unsupported`},
propsResults: map[string]map[string]string{
`testpool`: {
`unsupported`: `1024`,
},
},
metricResults: `# HELP zfs_pool_unsupported !!! This property is unsupported, results are likely to be undesirable, please file an issue at https://github.com/pdf/zfs_exporter/issues to have this property supported !!!
# TYPE zfs_pool_unsupported gauge
zfs_pool_unsupported{pool="testpool"} 1024
`,
},
{
name: `legacy fragmentation/dedupratio`,
pools: []string{`testpool`},
propsRequested: []string{`fragmentation`, `dedupratio`},
metricNames: []string{`zfs_pool_fragmentation_ratio`, `zfs_pool_deduplication_ratio`},
propsResults: map[string]map[string]string{
`testpool`: {
`fragmentation`: `5%`,
`dedupratio`: `2.50x`,
},
},
metricResults: `# HELP zfs_pool_fragmentation_ratio The fragmentation ratio of the pool.
# TYPE zfs_pool_fragmentation_ratio gauge
zfs_pool_fragmentation_ratio{pool="testpool"} 0.05
# HELP zfs_pool_deduplication_ratio The ratio of deduplicated size vs undeduplicated size for data in this pool.
# TYPE zfs_pool_deduplication_ratio gauge
zfs_pool_deduplication_ratio{pool="testpool"} 0.4
`,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctrl, ctx := gomock.WithContext(context.Background(), t)
zfsClient := mock_zfs.NewMockClient(ctrl)
config := defaultConfig(zfsClient)
if tc.explicitPools != nil {
config.Pools = tc.explicitPools
}
zfsClient.EXPECT().PoolNames().Return(tc.pools, nil).Times(1)
for _, pool := range tc.pools {
if tc.explicitPools != nil {
wanted := false
for _, explicit := range tc.explicitPools {
if pool == explicit {
wanted = true
}
break
}
if !wanted {
continue
}
}
zfsPoolProperties := mock_zfs.NewMockPoolProperties(ctrl)
zfsPoolProperties.EXPECT().Properties().Return(tc.propsResults[pool]).Times(1)
zfsPool := mock_zfs.NewMockPool(ctrl)
zfsPool.EXPECT().Properties(tc.propsRequested).Return(zfsPoolProperties, nil).Times(1)
zfsClient.EXPECT().Pool(pool).Return(zfsPool).Times(1)
}
collector, err := NewZFS(config)
if err != nil {
t.Fatal(err)
}
collector.Collectors = map[string]State{
`pool`: {
Name: "pool",
Enabled: boolPointer(true),
Properties: stringPointer(strings.Join(tc.propsRequested, `,`)),
factory: newPoolCollector,
},
}
if err = callCollector(ctx, collector, []byte(tc.metricResults), tc.metricNames); err != nil {
t.Fatal(err)
}
})
}
}
================================================
FILE: collector/transform.go
================================================
package collector
import (
"fmt"
"strconv"
"github.com/pdf/zfs_exporter/v2/zfs"
)
type poolHealthCode int
const (
poolOnline poolHealthCode = iota
poolDegraded
poolFaulted
poolOffline
poolUnavail
poolRemoved
poolSuspended
)
func transformNumeric(value string) (float64, error) {
if value == `-` || value == `none` {
return 0, nil
}
return strconv.ParseFloat(value, 64)
}
func transformHealthCode(status string) (float64, error) {
var result poolHealthCode
switch zfs.PoolStatus(status) {
case zfs.PoolOnline:
result = poolOnline
case zfs.PoolDegraded:
result = poolDegraded
case zfs.PoolFaulted:
result = poolFaulted
case zfs.PoolOffline:
result = poolOffline
case zfs.PoolUnavail:
result = poolUnavail
case zfs.PoolRemoved:
result = poolRemoved
case zfs.PoolSuspended:
result = poolSuspended
default:
return -1, fmt.Errorf(`unknown pool heath status: %s`, status)
}
return float64(result), nil
}
func transformBool(value string) (float64, error) {
switch value {
case `on`, `yes`, `enabled`, `active`:
return 1, nil
case `off`, `no`, `disabled`, `inactive`, `-`:
return 0, nil
}
return -1, fmt.Errorf(`could not convert '%s' to bool`, value)
}
func transformPercentage(value string) (float64, error) {
if len(value) > 0 && value[len(value)-1] == '%' {
value = value[:len(value)-1]
}
v, err := transformNumeric(value)
if err != nil {
return -1, err
}
return v / 100, nil
}
func transformMultiplier(value string) (float64, error) {
if len(value) > 0 && value[len(value)-1] == 'x' {
value = value[:len(value)-1]
}
v, err := transformNumeric(value)
if err != nil {
return -1, err
}
return 1 / v, nil
}
================================================
FILE: collector/zfs.go
================================================
package collector
import (
"context"
"log/slog"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/prometheus/client_golang/prometheus"
)
type regexpCollection []*regexp.Regexp
func (c regexpCollection) MatchString(input string) bool {
for _, r := range c {
if r.MatchString(input) {
return true
}
}
return false
}
// ZFSConfig configures a ZFS collector
type ZFSConfig struct {
DisableMetrics bool
Deadline time.Duration
Pools []string
Excludes []string
Logger *slog.Logger
ZFSClient zfs.Client
}
// ZFS collector
type ZFS struct {
Pools []string
Collectors map[string]State
client zfs.Client
disableMetrics bool
deadline time.Duration
cache *metricCache
ready chan struct{}
logger *slog.Logger
excludes regexpCollection
}
// Describe implements the prometheus.Collector interface.
func (c *ZFS) Describe(ch chan<- *prometheus.Desc) {
if !c.disableMetrics {
ch <- scrapeDurationDesc
ch <- scrapeSuccessDesc
}
for _, state := range c.Collectors {
if !*state.Enabled {
continue
}
collector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`))
if err != nil {
continue
}
collector.describe(ch)
}
}
// Collect implements the prometheus.Collector interface.
func (c *ZFS) Collect(ch chan<- prometheus.Metric) {
select {
case <-c.ready:
default:
c.sendCached(ch, make(map[string]struct{}))
return
}
ctx, cancel := context.WithTimeout(context.Background(), c.deadline)
defer cancel()
cache := newMetricCache()
proxy := make(chan metric)
// Synchronize on collector completion.
wg := sync.WaitGroup{}
wg.Add(len(c.Collectors))
// Synchonize after timeout event, ensuring no writers are still active when we return control.
timeout := make(chan struct{})
finalized := make(chan struct{})
finalize := func() {
select {
case <-finalized:
default:
close(finalized)
}
}
// Close the proxy channel upon collector completion.
go func() {
wg.Wait()
close(proxy)
}()
// Cache metrics as they come in via the proxy channel, and ship them out if we've not exceeded the deadline.
go func() {
for metric := range proxy {
cache.add(metric)
select {
case <-timeout:
finalize()
default:
ch <- metric.prometheus
}
}
// Signal completion and update full cache.
c.cache.replace(cache)
cancel()
// Notify next collection that we're ready to collect again
c.ready <- struct{}{}
}()
pools, poolErr := c.getPools(c.Pools)
for name, state := range c.Collectors {
if !*state.Enabled {
wg.Done()
continue
}
if poolErr != nil {
c.publishCollectorMetrics(ctx, name, poolErr, 0, proxy)
wg.Done()
continue
}
collector, err := state.factory(c.logger, c.client, strings.Split(*state.Properties, `,`))
if err != nil {
c.logger.Error("Error instantiating collector", "collector", name, "err", err)
wg.Done()
continue
}
go func(name string, collector Collector) {
c.execute(ctx, name, collector, proxy, pools)
wg.Done()
}(name, collector)
}
// Wait for completion or timeout
<-ctx.Done()
err := ctx.Err()
if err == context.Canceled {
finalize()
} else if err != nil {
// Upon exceeding deadline, send cached data for any metrics that have not already been reported.
close(timeout) // assert timeout for flow control in other goroutines
c.cache.merge(cache)
cacheIndex := cache.index()
c.sendCached(ch, cacheIndex)
}
// Ensure there are no in-flight writes to the upstream channel
<-finalized
}
// sendCached values that do not appear in the current cacheIndex.
func (c *ZFS) sendCached(ch chan<- prometheus.Metric, cacheIndex map[string]struct{}) {
c.cache.RLock()
defer c.cache.RUnlock()
for name, metric := range c.cache.cache {
if _, ok := cacheIndex[name]; ok {
continue
}
ch <- metric
}
}
func (c *ZFS) getPools(pools []string) ([]string, error) {
poolNames, err := c.client.PoolNames()
if err != nil {
return nil, err
}
// Return all pools if not explicitly configured.
if len(pools) == 0 {
return poolNames, nil
}
// Configured pools may not exist, so append available pools as they're found, rather than allocating up front.
result := make([]string, 0)
for _, want := range pools {
found := false
for _, avail := range poolNames {
if want == avail {
result = append(result, want)
found = true
break
}
}
if !found {
c.logger.Warn("Pool unavailable", "pool", want)
}
}
return result, nil
}
func (c *ZFS) execute(ctx context.Context, name string, collector Collector, ch chan<- metric, pools []string) {
begin := time.Now()
err := collector.update(ch, pools, c.excludes)
duration := time.Since(begin)
c.publishCollectorMetrics(ctx, name, err, duration, ch)
}
func (c *ZFS) publishCollectorMetrics(ctx context.Context, name string, err error, duration time.Duration, ch chan<- metric) {
var success float64
if err != nil {
c.logger.Error("Executing collector", "status", "error", "collector", name, "durationSeconds", duration.Seconds(), "err", err)
success = 0
} else {
select {
case <-ctx.Done():
err = ctx.Err()
default:
err = nil
}
if err != nil && err != context.Canceled {
c.logger.Warn("Executing collector", "status", "delayed", "collector", name, "durationSeconds", duration.Seconds(), "err", ctx.Err())
success = 0
} else {
c.logger.Debug("Executing collector", "status", "ok", "collector", name, "durationSeconds", duration.Seconds())
success = 1
}
}
if c.disableMetrics {
return
}
ch <- metric{
name: scrapeDurationDescName,
prometheus: prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name),
}
ch <- metric{
name: scrapeSuccessDescName,
prometheus: prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name),
}
}
// NewZFS instantiates a ZFS collector with the provided ZFSConfig
func NewZFS(config ZFSConfig) (*ZFS, error) {
sort.Strings(config.Pools)
sort.Strings(config.Excludes)
excludes := make(regexpCollection, len(config.Excludes))
for i, v := range config.Excludes {
excludes[i] = regexp.MustCompile(v)
}
ready := make(chan struct{}, 1)
ready <- struct{}{}
return &ZFS{
disableMetrics: config.DisableMetrics,
client: config.ZFSClient,
deadline: config.Deadline,
Pools: config.Pools,
Collectors: collectorStates,
excludes: excludes,
cache: newMetricCache(),
ready: ready,
logger: config.Logger,
}, nil
}
================================================
FILE: collector/zfs_test.go
================================================
package collector
import (
"context"
"errors"
"testing"
"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs"
"go.uber.org/mock/gomock"
)
func TestZFSCollectInvalidPools(t *testing.T) {
const result = `# HELP zfs_scrape_collector_duration_seconds zfs_exporter: Duration of a collector scrape.
# TYPE zfs_scrape_collector_duration_seconds gauge
zfs_scrape_collector_duration_seconds{collector="pool"} 0
# HELP zfs_scrape_collector_success zfs_exporter: Whether a collector succeeded.
# TYPE zfs_scrape_collector_success gauge
zfs_scrape_collector_success{collector="pool"} 0
`
ctrl, ctx := gomock.WithContext(context.Background(), t)
zfsClient := mock_zfs.NewMockClient(ctrl)
zfsClient.EXPECT().PoolNames().Return(nil, errors.New(`Error returned from PoolNames()`)).Times(1)
config := defaultConfig(zfsClient)
config.DisableMetrics = false
collector, err := NewZFS(config)
collector.Collectors = map[string]State{
`pool`: {
Name: "pool",
Enabled: boolPointer(true),
Properties: stringPointer(``),
factory: newPoolCollector,
},
}
if err != nil {
t.Fatal(err)
}
if err = callCollector(ctx, collector, []byte(result), []string{`zfs_scrape_collector_duration_seconds`, `zfs_scrape_collector_success`}); err != nil {
t.Fatal(err)
}
}
================================================
FILE: go.mod
================================================
module github.com/pdf/zfs_exporter/v2
go 1.24.0
toolchain go1.24.2
require (
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.67.4
golang.org/x/sys v0.38.0 // indirect
)
require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/prometheus/exporter-toolkit v0.15.0
go.uber.org/mock v0.6.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.6.0 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
)
tool go.uber.org/mock/mockgen
================================================
FILE: go.sum
================================================
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=
github.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk=
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
================================================
FILE: zfs/dataset.go
================================================
package zfs
import (
"strings"
)
// DatasetKind enum of supported dataset types
type DatasetKind string
const (
// DatasetFilesystem enum entry
DatasetFilesystem DatasetKind = `filesystem`
// DatasetVolume enum entry
DatasetVolume DatasetKind = `volume`
// DatasetSnapshot enum entry
DatasetSnapshot DatasetKind = `snapshot`
)
type datasetsImpl struct {
pool string
kind DatasetKind
}
func (d datasetsImpl) Pool() string {
return d.pool
}
func (d datasetsImpl) Kind() DatasetKind {
return d.kind
}
func (d datasetsImpl) Properties(props ...string) ([]DatasetProperties, error) {
handler := newDatasetHandler()
if err := execute(d.pool, handler, `zfs`, `get`, `-Hprt`, string(d.kind), `-o`, `name,property,value`, strings.Join(props, `,`)); err != nil {
return nil, err
}
return handler.datasets(), nil
}
type datasetPropertiesImpl struct {
datasetName string
properties map[string]string
}
func (p *datasetPropertiesImpl) DatasetName() string {
return p.datasetName
}
func (p *datasetPropertiesImpl) Properties() map[string]string {
return p.properties
}
// datasetHandler handles parsing of the data returned from the CLI into Dataset structs
type datasetHandler struct {
store map[string]*datasetPropertiesImpl
}
// processLine implements the handler interface
func (h *datasetHandler) processLine(pool string, line []string) error {
if len(line) != 3 || !strings.HasPrefix(line[0], pool) {
return ErrInvalidOutput
}
if _, ok := h.store[line[0]]; !ok {
h.store[line[0]] = newDatasetPropertiesImpl(line[0])
}
h.store[line[0]].properties[line[1]] = line[2]
return nil
}
func (h *datasetHandler) datasets() []DatasetProperties {
result := make([]DatasetProperties, len(h.store))
i := 0
for _, dataset := range h.store {
result[i] = dataset
i++
}
return result
}
func newDatasetPropertiesImpl(name string) *datasetPropertiesImpl {
return &datasetPropertiesImpl{
datasetName: name,
properties: make(map[string]string),
}
}
func newDatasetsImpl(pool string, kind DatasetKind) datasetsImpl {
return datasetsImpl{
pool: pool,
kind: kind,
}
}
func newDatasetHandler() *datasetHandler {
return &datasetHandler{
store: make(map[string]*datasetPropertiesImpl),
}
}
================================================
FILE: zfs/mock_zfs/mock_zfs.go
================================================
// Code generated by MockGen. DO NOT EDIT.
// Source: zfs.go
//
// Generated by this command:
//
// mockgen -source=zfs.go -destination=mock_zfs/mock_zfs.go -package=mock_zfs
//
// Package mock_zfs is a generated GoMock package.
package mock_zfs
import (
reflect "reflect"
zfs "github.com/pdf/zfs_exporter/v2/zfs"
gomock "go.uber.org/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
isgomock struct{}
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// Datasets mocks base method.
func (m *MockClient) Datasets(pool string, kind zfs.DatasetKind) zfs.Datasets {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Datasets", pool, kind)
ret0, _ := ret[0].(zfs.Datasets)
return ret0
}
// Datasets indicates an expected call of Datasets.
func (mr *MockClientMockRecorder) Datasets(pool, kind any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Datasets", reflect.TypeOf((*MockClient)(nil).Datasets), pool, kind)
}
// Pool mocks base method.
func (m *MockClient) Pool(name string) zfs.Pool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Pool", name)
ret0, _ := ret[0].(zfs.Pool)
return ret0
}
// Pool indicates an expected call of Pool.
func (mr *MockClientMockRecorder) Pool(name any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pool", reflect.TypeOf((*MockClient)(nil).Pool), name)
}
// PoolNames mocks base method.
func (m *MockClient) PoolNames() ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PoolNames")
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PoolNames indicates an expected call of PoolNames.
func (mr *MockClientMockRecorder) PoolNames() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PoolNames", reflect.TypeOf((*MockClient)(nil).PoolNames))
}
// MockPool is a mock of Pool interface.
type MockPool struct {
ctrl *gomock.Controller
recorder *MockPoolMockRecorder
isgomock struct{}
}
// MockPoolMockRecorder is the mock recorder for MockPool.
type MockPoolMockRecorder struct {
mock *MockPool
}
// NewMockPool creates a new mock instance.
func NewMockPool(ctrl *gomock.Controller) *MockPool {
mock := &MockPool{ctrl: ctrl}
mock.recorder = &MockPoolMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPool) EXPECT() *MockPoolMockRecorder {
return m.recorder
}
// Name mocks base method.
func (m *MockPool) Name() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Name")
ret0, _ := ret[0].(string)
return ret0
}
// Name indicates an expected call of Name.
func (mr *MockPoolMockRecorder) Name() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockPool)(nil).Name))
}
// Properties mocks base method.
func (m *MockPool) Properties(props ...string) (zfs.PoolProperties, error) {
m.ctrl.T.Helper()
varargs := []any{}
for _, a := range props {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Properties", varargs...)
ret0, _ := ret[0].(zfs.PoolProperties)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Properties indicates an expected call of Properties.
func (mr *MockPoolMockRecorder) Properties(props ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockPool)(nil).Properties), props...)
}
// MockPoolProperties is a mock of PoolProperties interface.
type MockPoolProperties struct {
ctrl *gomock.Controller
recorder *MockPoolPropertiesMockRecorder
isgomock struct{}
}
// MockPoolPropertiesMockRecorder is the mock recorder for MockPoolProperties.
type MockPoolPropertiesMockRecorder struct {
mock *MockPoolProperties
}
// NewMockPoolProperties creates a new mock instance.
func NewMockPoolProperties(ctrl *gomock.Controller) *MockPoolProperties {
mock := &MockPoolProperties{ctrl: ctrl}
mock.recorder = &MockPoolPropertiesMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPoolProperties) EXPECT() *MockPoolPropertiesMockRecorder {
return m.recorder
}
// Properties mocks base method.
func (m *MockPoolProperties) Properties() map[string]string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Properties")
ret0, _ := ret[0].(map[string]string)
return ret0
}
// Properties indicates an expected call of Properties.
func (mr *MockPoolPropertiesMockRecorder) Properties() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockPoolProperties)(nil).Properties))
}
// MockDatasets is a mock of Datasets interface.
type MockDatasets struct {
ctrl *gomock.Controller
recorder *MockDatasetsMockRecorder
isgomock struct{}
}
// MockDatasetsMockRecorder is the mock recorder for MockDatasets.
type MockDatasetsMockRecorder struct {
mock *MockDatasets
}
// NewMockDatasets creates a new mock instance.
func NewMockDatasets(ctrl *gomock.Controller) *MockDatasets {
mock := &MockDatasets{ctrl: ctrl}
mock.recorder = &MockDatasetsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDatasets) EXPECT() *MockDatasetsMockRecorder {
return m.recorder
}
// Kind mocks base method.
func (m *MockDatasets) Kind() zfs.DatasetKind {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Kind")
ret0, _ := ret[0].(zfs.DatasetKind)
return ret0
}
// Kind indicates an expected call of Kind.
func (mr *MockDatasetsMockRecorder) Kind() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kind", reflect.TypeOf((*MockDatasets)(nil).Kind))
}
// Pool mocks base method.
func (m *MockDatasets) Pool() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Pool")
ret0, _ := ret[0].(string)
return ret0
}
// Pool indicates an expected call of Pool.
func (mr *MockDatasetsMockRecorder) Pool() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pool", reflect.TypeOf((*MockDatasets)(nil).Pool))
}
// Properties mocks base method.
func (m *MockDatasets) Properties(props ...string) ([]zfs.DatasetProperties, error) {
m.ctrl.T.Helper()
varargs := []any{}
for _, a := range props {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Properties", varargs...)
ret0, _ := ret[0].([]zfs.DatasetProperties)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Properties indicates an expected call of Properties.
func (mr *MockDatasetsMockRecorder) Properties(props ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockDatasets)(nil).Properties), props...)
}
// MockDatasetProperties is a mock of DatasetProperties interface.
type MockDatasetProperties struct {
ctrl *gomock.Controller
recorder *MockDatasetPropertiesMockRecorder
isgomock struct{}
}
// MockDatasetPropertiesMockRecorder is the mock recorder for MockDatasetProperties.
type MockDatasetPropertiesMockRecorder struct {
mock *MockDatasetProperties
}
// NewMockDatasetProperties creates a new mock instance.
func NewMockDatasetProperties(ctrl *gomock.Controller) *MockDatasetProperties {
mock := &MockDatasetProperties{ctrl: ctrl}
mock.recorder = &MockDatasetPropertiesMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDatasetProperties) EXPECT() *MockDatasetPropertiesMockRecorder {
return m.recorder
}
// DatasetName mocks base method.
func (m *MockDatasetProperties) DatasetName() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DatasetName")
ret0, _ := ret[0].(string)
return ret0
}
// DatasetName indicates an expected call of DatasetName.
func (mr *MockDatasetPropertiesMockRecorder) DatasetName() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DatasetName", reflect.TypeOf((*MockDatasetProperties)(nil).DatasetName))
}
// Properties mocks base method.
func (m *MockDatasetProperties) Properties() map[string]string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Properties")
ret0, _ := ret[0].(map[string]string)
return ret0
}
// Properties indicates an expected call of Properties.
func (mr *MockDatasetPropertiesMockRecorder) Properties() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Properties", reflect.TypeOf((*MockDatasetProperties)(nil).Properties))
}
// Mockhandler is a mock of handler interface.
type Mockhandler struct {
ctrl *gomock.Controller
recorder *MockhandlerMockRecorder
isgomock struct{}
}
// MockhandlerMockRecorder is the mock recorder for Mockhandler.
type MockhandlerMockRecorder struct {
mock *Mockhandler
}
// NewMockhandler creates a new mock instance.
func NewMockhandler(ctrl *gomock.Controller) *Mockhandler {
mock := &Mockhandler{ctrl: ctrl}
mock.recorder = &MockhandlerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *Mockhandler) EXPECT() *MockhandlerMockRecorder {
return m.recorder
}
// processLine mocks base method.
func (m *Mockhandler) processLine(pool string, line []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "processLine", pool, line)
ret0, _ := ret[0].(error)
return ret0
}
// processLine indicates an expected call of processLine.
func (mr *MockhandlerMockRecorder) processLine(pool, line any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "processLine", reflect.TypeOf((*Mockhandler)(nil).processLine), pool, line)
}
================================================
FILE: zfs/pool.go
================================================
package zfs
import (
"bufio"
"fmt"
"io"
"os/exec"
"strings"
)
// PoolStatus enum contains status text
type PoolStatus string
const (
// PoolOnline enum entry
PoolOnline PoolStatus = `ONLINE`
// PoolDegraded enum entry
PoolDegraded PoolStatus = `DEGRADED`
// PoolFaulted enum entry
PoolFaulted PoolStatus = `FAULTED`
// PoolOffline enum entry
PoolOffline PoolStatus = `OFFLINE`
// PoolUnavail enum entry
PoolUnavail PoolStatus = `UNAVAIL`
// PoolRemoved enum entry
PoolRemoved PoolStatus = `REMOVED`
// PoolSuspended enum entry
PoolSuspended PoolStatus = `SUSPENDED`
)
type poolImpl struct {
name string
}
func (p poolImpl) Name() string {
return p.name
}
func (p poolImpl) Properties(props ...string) (PoolProperties, error) {
handler := newPoolPropertiesImpl()
if err := execute(p.name, handler, `zpool`, `get`, `-Hpo`, `name,property,value`, strings.Join(props, `,`)); err != nil {
return handler, err
}
return handler, nil
}
type poolPropertiesImpl struct {
properties map[string]string
}
func (p *poolPropertiesImpl) Properties() map[string]string {
return p.properties
}
// processLine implements the handler interface
func (p *poolPropertiesImpl) processLine(pool string, line []string) error {
if len(line) != 3 || line[0] != pool {
return ErrInvalidOutput
}
p.properties[line[1]] = line[2]
return nil
}
// PoolNames returns a list of available pool names
func poolNames() ([]string, error) {
pools := make([]string, 0)
cmd := exec.Command(`zpool`, `list`, `-Ho`, `name`)
out, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(out)
if err = cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to start command '%s': %w", cmd.String(), err)
}
for scanner.Scan() {
pools = append(pools, scanner.Text())
}
stde, _ := io.ReadAll(stderr)
if err = cmd.Wait(); err != nil {
return nil, fmt.Errorf("failed to execute command '%s'; output: '%s' (%w)", cmd.String(), strings.TrimSpace(string(stde)), err)
}
return pools, nil
}
func newPoolImpl(name string) poolImpl {
return poolImpl{
name: name,
}
}
func newPoolPropertiesImpl() *poolPropertiesImpl {
return &poolPropertiesImpl{
properties: make(map[string]string),
}
}
================================================
FILE: zfs/zfs.go
================================================
//go:generate go tool go.uber.org/mock/mockgen -source=zfs.go -destination=mock_zfs/mock_zfs.go -package=mock_zfs
package zfs
import (
"encoding/csv"
"errors"
"fmt"
"io"
"os/exec"
"strings"
)
// ErrInvalidOutput is returned on unparseable CLI output
var ErrInvalidOutput = errors.New(`invalid output executing command`)
// Client is the primary entrypoint
type Client interface {
PoolNames() ([]string, error)
Pool(name string) Pool
Datasets(pool string, kind DatasetKind) Datasets
}
// Pool allows querying pool properties
type Pool interface {
Name() string
Properties(props ...string) (PoolProperties, error)
}
// PoolProperties provides access to the properties for a pool
type PoolProperties interface {
Properties() map[string]string
}
// Datasets allows querying properties for datasets in a pool
type Datasets interface {
Pool() string
Kind() DatasetKind
Properties(props ...string) ([]DatasetProperties, error)
}
// DatasetProperties provides access to the properties for a dataset
type DatasetProperties interface {
DatasetName() string
Properties() map[string]string
}
type handler interface {
processLine(pool string, line []string) error
}
type clientImpl struct{}
func (z clientImpl) PoolNames() ([]string, error) {
return poolNames()
}
func (z clientImpl) Pool(name string) Pool {
return newPoolImpl(name)
}
func (z clientImpl) Datasets(pool string, kind DatasetKind) Datasets {
return newDatasetsImpl(pool, kind)
}
func execute(pool string, h handler, cmd string, args ...string) error {
c := exec.Command(cmd, append(args, pool)...)
out, err := c.StdoutPipe()
if err != nil {
return err
}
stderr, err := c.StderrPipe()
if err != nil {
return err
}
r := csv.NewReader(out)
r.Comma = '\t'
r.LazyQuotes = true
r.ReuseRecord = true
r.FieldsPerRecord = 3
if err = c.Start(); err != nil {
return fmt.Errorf("failed to start command '%s': %w", c.String(), err)
}
for {
line, err := r.Read()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return err
}
if err = h.processLine(pool, line); err != nil {
return err
}
}
stde, _ := io.ReadAll(stderr)
if err = c.Wait(); err != nil {
return fmt.Errorf("failed to execute command '%s'; output: '%s' (%w)", c.String(), strings.TrimSpace(string(stde)), err)
}
return nil
}
// New instantiates a ZFS Client
func New() Client {
return clientImpl{}
}
================================================
FILE: zfs_exporter.go
================================================
package main
import (
"net/http"
"os"
"strings"
"github.com/pdf/zfs_exporter/v2/collector"
"github.com/pdf/zfs_exporter/v2/zfs"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus/client_golang/prometheus"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/exporter-toolkit/web"
"github.com/prometheus/exporter-toolkit/web/kingpinflag"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/promslog/flag"
"github.com/prometheus/common/version"
)
func main() {
var (
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
metricsExporterDisabled = kingpin.Flag(`web.disable-exporter-metrics`, `Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).`).Default(`false`).Bool()
deadline = kingpin.Flag("deadline", "Maximum duration that a collection should run before returning cached data. Should be set to a value shorter than your scrape timeout duration. The current collection run will continue and update the cache when complete (default: 8s)").Default("8s").Duration()
pools = kingpin.Flag("pool", "Name of the pool(s) to collect, repeat for multiple pools (default: all pools).").Strings()
excludes = kingpin.Flag("exclude", "Exclude datasets/snapshots/volumes that match the provided regex (e.g. '^rpool/docker/'), may be specified multiple times.").Strings()
toolkitFlags = kingpinflag.AddFlags(kingpin.CommandLine, ":9134")
)
promslogConfig := &promslog.Config{}
flag.AddFlags(kingpin.CommandLine, promslogConfig)
kingpin.Version(version.Print("zfs_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
logger := promslog.New(promslogConfig)
logger.Info("Starting zfs_exporter", "version", version.Info())
logger.Info("Build context", "context", version.BuildContext())
c, err := collector.NewZFS(collector.ZFSConfig{
DisableMetrics: *metricsExporterDisabled,
Deadline: *deadline,
Pools: *pools,
Excludes: *excludes,
Logger: logger,
ZFSClient: zfs.New(),
})
if err != nil {
logger.Error("Error creating an exporter", "err", err)
os.Exit(1)
}
if *metricsExporterDisabled {
r := prometheus.NewRegistry()
prometheus.DefaultRegisterer = r
prometheus.DefaultGatherer = r
}
prometheus.MustRegister(c)
prometheus.MustRegister(versioncollector.NewCollector("zfs_exporter"))
if len(c.Pools) > 0 {
logger.Info("Enabling pools", "pools", strings.Join(c.Pools, ", "))
} else {
logger.Info("Enabling pools", "pools", "(all)")
}
collectorNames := make([]string, 0, len(c.Collectors))
for n, c := range c.Collectors {
if *c.Enabled {
collectorNames = append(collectorNames, n)
}
}
logger.Info("Enabling collectors", "collectors", strings.Join(collectorNames, ", "))
http.Handle(*metricsPath, promhttp.Handler())
if *metricsPath != "/" {
landingConfig := web.LandingConfig{
Name: "ZFS Exporter",
Description: "Prometheus ZFS Exporter",
Version: version.Info(),
Links: []web.LandingLinks{
{
Address: *metricsPath,
Text: "Metrics",
},
},
}
landingPage, err := web.NewLandingPage(landingConfig)
if err != nil {
logger.Error("Error creating landing page", "err", err)
os.Exit(1)
}
http.Handle("/", landingPage)
}
server := &http.Server{}
err = web.ListenAndServe(server, toolkitFlags, logger)
if err != nil {
logger.Error("Error starting HTTP server", "err", err)
os.Exit(1)
}
}
gitextract__6skz3q_/ ├── .github/ │ ├── CONTRIBUTING.md │ └── workflows/ │ ├── release.yml │ └── test.yml ├── .gitignore ├── .golangci.yml ├── .promu.yml ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── Makefile.common ├── README.md ├── VERSION ├── collector/ │ ├── cache.go │ ├── collector.go │ ├── collector_test.go │ ├── dataset.go │ ├── dataset_test.go │ ├── pool.go │ ├── pool_test.go │ ├── transform.go │ ├── zfs.go │ └── zfs_test.go ├── go.mod ├── go.sum ├── zfs/ │ ├── dataset.go │ ├── mock_zfs/ │ │ └── mock_zfs.go │ ├── pool.go │ └── zfs.go └── zfs_exporter.go
SYMBOL INDEX (175 symbols across 15 files)
FILE: collector/cache.go
type metricCache (line 9) | type metricCache struct
method add (line 14) | func (c *metricCache) add(m metric) {
method merge (line 20) | func (c *metricCache) merge(other *metricCache) {
method replace (line 35) | func (c *metricCache) replace(other *metricCache) {
method index (line 41) | func (c *metricCache) index() map[string]struct{} {
function newMetricCache (line 52) | func newMetricCache() *metricCache {
FILE: collector/collector.go
constant defaultEnabled (line 16) | defaultEnabled = true
constant defaultDisabled (line 17) | defaultDisabled = false
constant namespace (line 18) | namespace = `zfs`
constant helpDefaultStateEnabled (line 19) | helpDefaultStateEnabled = `enabled`
constant helpDefaultStateDisabled (line 20) | helpDefaultStateDisabled = `disabled`
constant subsystemDataset (line 22) | subsystemDataset = `dataset`
constant subsystemPool (line 23) | subsystemPool = `pool`
constant propertyUnsupportedDesc (line 25) | propertyUnsupportedDesc = `!!! This property is unsupported, results are...
constant propertyUnsupportedMsg (line 26) | propertyUnsupportedMsg = `Unsupported dataset property, results are lik...
constant helpIssue (line 27) | helpIssue = `Please file an issue at https://github.com/pd...
type factoryFunc (line 50) | type factoryFunc
type transformFunc (line 52) | type transformFunc
type State (line 55) | type State struct
type Collector (line 63) | type Collector interface
type metric (line 68) | type metric struct
type property (line 73) | type property struct
method push (line 80) | func (p property) push(ch chan<- metric, value string, labelValues ......
type propertyStore (line 98) | type propertyStore struct
method find (line 104) | func (p *propertyStore) find(name string) (property, error) {
function registerCollector (line 120) | func registerCollector(collector string, isDefaultEnabled bool, defaultP...
function expandMetricName (line 143) | func expandMetricName(prefix string, context ...string) string {
function newProperty (line 147) | func newProperty(subsystem, metricName, helpText string, transform trans...
FILE: collector/collector_test.go
function callCollector (line 17) | func callCollector(ctx context.Context, collector prometheus.Collector, ...
function defaultConfig (line 31) | func defaultConfig(z zfs.Client) ZFSConfig {
function stringPointer (line 40) | func stringPointer(s string) *string {
function boolPointer (line 44) | func boolPointer(b bool) *bool {
FILE: collector/dataset.go
constant defaultFilesystemProps (line 13) | defaultFilesystemProps = `available,logicalused,quota,referenced,used,us...
constant defaultSnapshotProps (line 14) | defaultSnapshotProps = `logicalused,referenced,used,written`
constant defaultVolumeProps (line 15) | defaultVolumeProps = `available,logicalused,referenced,used,usedbyda...
function init (line 188) | func init() {
type datasetCollector (line 194) | type datasetCollector struct
method describe (line 201) | func (c *datasetCollector) describe(ch chan<- *prometheus.Desc) {
method update (line 212) | func (c *datasetCollector) update(ch chan<- metric, pools []string, ex...
method updatePoolMetrics (line 234) | func (c *datasetCollector) updatePoolMetrics(ch chan<- metric, pool st...
method updateDatasetMetrics (line 253) | func (c *datasetCollector) updateDatasetMetrics(ch chan<- metric, pool...
function newDatasetCollector (line 269) | func newDatasetCollector(kind zfs.DatasetKind, l *slog.Logger, c zfs.Cli...
function newFilesystemCollector (line 279) | func newFilesystemCollector(l *slog.Logger, c zfs.Client, props []string...
function newSnapshotCollector (line 283) | func newSnapshotCollector(l *slog.Logger, c zfs.Client, props []string) ...
function newVolumeCollector (line 287) | func newVolumeCollector(l *slog.Logger, c zfs.Client, props []string) (C...
FILE: collector/dataset_test.go
type datasetResults (line 13) | type datasetResults struct
function TestDatsetMetrics (line 18) | func TestDatsetMetrics(t *testing.T) {
FILE: collector/pool.go
constant defaultPoolProps (line 13) | defaultPoolProps = `allocated,dedupratio,fragmentation,free,freeing,heal...
function init (line 122) | func init() {
type poolCollector (line 126) | type poolCollector struct
method describe (line 132) | func (c *poolCollector) describe(ch chan<- *prometheus.Desc) {
method update (line 143) | func (c *poolCollector) update(ch chan<- metric, pools []string, exclu...
method updatePoolMetrics (line 165) | func (c *poolCollector) updatePoolMetrics(ch chan<- metric, pool strin...
function newPoolCollector (line 186) | func newPoolCollector(l *slog.Logger, c zfs.Client, props []string) (Col...
FILE: collector/pool_test.go
function TestPoolMetrics (line 12) | func TestPoolMetrics(t *testing.T) {
FILE: collector/transform.go
type poolHealthCode (line 10) | type poolHealthCode
constant poolOnline (line 13) | poolOnline poolHealthCode = iota
constant poolDegraded (line 14) | poolDegraded
constant poolFaulted (line 15) | poolFaulted
constant poolOffline (line 16) | poolOffline
constant poolUnavail (line 17) | poolUnavail
constant poolRemoved (line 18) | poolRemoved
constant poolSuspended (line 19) | poolSuspended
function transformNumeric (line 22) | func transformNumeric(value string) (float64, error) {
function transformHealthCode (line 29) | func transformHealthCode(status string) (float64, error) {
function transformBool (line 53) | func transformBool(value string) (float64, error) {
function transformPercentage (line 64) | func transformPercentage(value string) (float64, error) {
function transformMultiplier (line 76) | func transformMultiplier(value string) (float64, error) {
FILE: collector/zfs.go
type regexpCollection (line 16) | type regexpCollection
method MatchString (line 18) | func (c regexpCollection) MatchString(input string) bool {
type ZFSConfig (line 29) | type ZFSConfig struct
type ZFS (line 39) | type ZFS struct
method Describe (line 52) | func (c *ZFS) Describe(ch chan<- *prometheus.Desc) {
method Collect (line 72) | func (c *ZFS) Collect(ch chan<- prometheus.Metric) {
method sendCached (line 165) | func (c *ZFS) sendCached(ch chan<- prometheus.Metric, cacheIndex map[s...
method getPools (line 176) | func (c *ZFS) getPools(pools []string) ([]string, error) {
method execute (line 205) | func (c *ZFS) execute(ctx context.Context, name string, collector Coll...
method publishCollectorMetrics (line 213) | func (c *ZFS) publishCollectorMetrics(ctx context.Context, name string...
function NewZFS (line 249) | func NewZFS(config ZFSConfig) (*ZFS, error) {
FILE: collector/zfs_test.go
function TestZFSCollectInvalidPools (line 12) | func TestZFSCollectInvalidPools(t *testing.T) {
FILE: zfs/dataset.go
type DatasetKind (line 8) | type DatasetKind
constant DatasetFilesystem (line 12) | DatasetFilesystem DatasetKind = `filesystem`
constant DatasetVolume (line 14) | DatasetVolume DatasetKind = `volume`
constant DatasetSnapshot (line 16) | DatasetSnapshot DatasetKind = `snapshot`
type datasetsImpl (line 19) | type datasetsImpl struct
method Pool (line 24) | func (d datasetsImpl) Pool() string {
method Kind (line 28) | func (d datasetsImpl) Kind() DatasetKind {
method Properties (line 32) | func (d datasetsImpl) Properties(props ...string) ([]DatasetProperties...
type datasetPropertiesImpl (line 40) | type datasetPropertiesImpl struct
method DatasetName (line 45) | func (p *datasetPropertiesImpl) DatasetName() string {
method Properties (line 49) | func (p *datasetPropertiesImpl) Properties() map[string]string {
type datasetHandler (line 54) | type datasetHandler struct
method processLine (line 59) | func (h *datasetHandler) processLine(pool string, line []string) error {
method datasets (line 70) | func (h *datasetHandler) datasets() []DatasetProperties {
function newDatasetPropertiesImpl (line 80) | func newDatasetPropertiesImpl(name string) *datasetPropertiesImpl {
function newDatasetsImpl (line 87) | func newDatasetsImpl(pool string, kind DatasetKind) datasetsImpl {
function newDatasetHandler (line 94) | func newDatasetHandler() *datasetHandler {
FILE: zfs/mock_zfs/mock_zfs.go
type MockClient (line 20) | type MockClient struct
method EXPECT (line 39) | func (m *MockClient) EXPECT() *MockClientMockRecorder {
method Datasets (line 44) | func (m *MockClient) Datasets(pool string, kind zfs.DatasetKind) zfs.D...
method Pool (line 58) | func (m *MockClient) Pool(name string) zfs.Pool {
method PoolNames (line 72) | func (m *MockClient) PoolNames() ([]string, error) {
type MockClientMockRecorder (line 27) | type MockClientMockRecorder struct
method Datasets (line 52) | func (mr *MockClientMockRecorder) Datasets(pool, kind any) *gomock.Call {
method Pool (line 66) | func (mr *MockClientMockRecorder) Pool(name any) *gomock.Call {
method PoolNames (line 81) | func (mr *MockClientMockRecorder) PoolNames() *gomock.Call {
function NewMockClient (line 32) | func NewMockClient(ctrl *gomock.Controller) *MockClient {
type MockPool (line 87) | type MockPool struct
method EXPECT (line 106) | func (m *MockPool) EXPECT() *MockPoolMockRecorder {
method Name (line 111) | func (m *MockPool) Name() string {
method Properties (line 125) | func (m *MockPool) Properties(props ...string) (zfs.PoolProperties, er...
type MockPoolMockRecorder (line 94) | type MockPoolMockRecorder struct
method Name (line 119) | func (mr *MockPoolMockRecorder) Name() *gomock.Call {
method Properties (line 138) | func (mr *MockPoolMockRecorder) Properties(props ...any) *gomock.Call {
function NewMockPool (line 99) | func NewMockPool(ctrl *gomock.Controller) *MockPool {
type MockPoolProperties (line 144) | type MockPoolProperties struct
method EXPECT (line 163) | func (m *MockPoolProperties) EXPECT() *MockPoolPropertiesMockRecorder {
method Properties (line 168) | func (m *MockPoolProperties) Properties() map[string]string {
type MockPoolPropertiesMockRecorder (line 151) | type MockPoolPropertiesMockRecorder struct
method Properties (line 176) | func (mr *MockPoolPropertiesMockRecorder) Properties() *gomock.Call {
function NewMockPoolProperties (line 156) | func NewMockPoolProperties(ctrl *gomock.Controller) *MockPoolProperties {
type MockDatasets (line 182) | type MockDatasets struct
method EXPECT (line 201) | func (m *MockDatasets) EXPECT() *MockDatasetsMockRecorder {
method Kind (line 206) | func (m *MockDatasets) Kind() zfs.DatasetKind {
method Pool (line 220) | func (m *MockDatasets) Pool() string {
method Properties (line 234) | func (m *MockDatasets) Properties(props ...string) ([]zfs.DatasetPrope...
type MockDatasetsMockRecorder (line 189) | type MockDatasetsMockRecorder struct
method Kind (line 214) | func (mr *MockDatasetsMockRecorder) Kind() *gomock.Call {
method Pool (line 228) | func (mr *MockDatasetsMockRecorder) Pool() *gomock.Call {
method Properties (line 247) | func (mr *MockDatasetsMockRecorder) Properties(props ...any) *gomock.C...
function NewMockDatasets (line 194) | func NewMockDatasets(ctrl *gomock.Controller) *MockDatasets {
type MockDatasetProperties (line 253) | type MockDatasetProperties struct
method EXPECT (line 272) | func (m *MockDatasetProperties) EXPECT() *MockDatasetPropertiesMockRec...
method DatasetName (line 277) | func (m *MockDatasetProperties) DatasetName() string {
method Properties (line 291) | func (m *MockDatasetProperties) Properties() map[string]string {
type MockDatasetPropertiesMockRecorder (line 260) | type MockDatasetPropertiesMockRecorder struct
method DatasetName (line 285) | func (mr *MockDatasetPropertiesMockRecorder) DatasetName() *gomock.Call {
method Properties (line 299) | func (mr *MockDatasetPropertiesMockRecorder) Properties() *gomock.Call {
function NewMockDatasetProperties (line 265) | func NewMockDatasetProperties(ctrl *gomock.Controller) *MockDatasetPrope...
type Mockhandler (line 305) | type Mockhandler struct
method EXPECT (line 324) | func (m *Mockhandler) EXPECT() *MockhandlerMockRecorder {
method processLine (line 329) | func (m *Mockhandler) processLine(pool string, line []string) error {
type MockhandlerMockRecorder (line 312) | type MockhandlerMockRecorder struct
method processLine (line 337) | func (mr *MockhandlerMockRecorder) processLine(pool, line any) *gomock...
function NewMockhandler (line 317) | func NewMockhandler(ctrl *gomock.Controller) *Mockhandler {
FILE: zfs/pool.go
type PoolStatus (line 12) | type PoolStatus
constant PoolOnline (line 16) | PoolOnline PoolStatus = `ONLINE`
constant PoolDegraded (line 18) | PoolDegraded PoolStatus = `DEGRADED`
constant PoolFaulted (line 20) | PoolFaulted PoolStatus = `FAULTED`
constant PoolOffline (line 22) | PoolOffline PoolStatus = `OFFLINE`
constant PoolUnavail (line 24) | PoolUnavail PoolStatus = `UNAVAIL`
constant PoolRemoved (line 26) | PoolRemoved PoolStatus = `REMOVED`
constant PoolSuspended (line 28) | PoolSuspended PoolStatus = `SUSPENDED`
type poolImpl (line 31) | type poolImpl struct
method Name (line 35) | func (p poolImpl) Name() string {
method Properties (line 39) | func (p poolImpl) Properties(props ...string) (PoolProperties, error) {
type poolPropertiesImpl (line 47) | type poolPropertiesImpl struct
method Properties (line 51) | func (p *poolPropertiesImpl) Properties() map[string]string {
method processLine (line 56) | func (p *poolPropertiesImpl) processLine(pool string, line []string) e...
function poolNames (line 66) | func poolNames() ([]string, error) {
function newPoolImpl (line 95) | func newPoolImpl(name string) poolImpl {
function newPoolPropertiesImpl (line 101) | func newPoolPropertiesImpl() *poolPropertiesImpl {
FILE: zfs/zfs.go
type Client (line 18) | type Client interface
type Pool (line 25) | type Pool interface
type PoolProperties (line 31) | type PoolProperties interface
type Datasets (line 36) | type Datasets interface
type DatasetProperties (line 43) | type DatasetProperties interface
type handler (line 48) | type handler interface
type clientImpl (line 52) | type clientImpl struct
method PoolNames (line 54) | func (z clientImpl) PoolNames() ([]string, error) {
method Pool (line 58) | func (z clientImpl) Pool(name string) Pool {
method Datasets (line 62) | func (z clientImpl) Datasets(pool string, kind DatasetKind) Datasets {
function execute (line 66) | func execute(pool string, h handler, cmd string, args ...string) error {
function New (line 109) | func New() Client {
FILE: zfs_exporter.go
function main (line 23) | func main() {
Condensed preview — 29 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (124K chars).
[
{
"path": ".github/CONTRIBUTING.md",
"chars": 458,
"preview": "# Contributing\n\nWhen contributing to this repository, please open an issue with a description of the problem you wish to"
},
{
"path": ".github/workflows/release.yml",
"chars": 3416,
"preview": "# This is a basic workflow to help you get started with Actions\nname: Release\n\n# Controls when the action will run. Trig"
},
{
"path": ".github/workflows/test.yml",
"chars": 988,
"preview": "name: Test\n\n# Controls when the action will run. Triggers the workflow on push or pull request\n# events but only for the"
},
{
"path": ".gitignore",
"chars": 32,
"preview": "zfs_exporter\n.build/\n.tarballs/\n"
},
{
"path": ".golangci.yml",
"chars": 1364,
"preview": "version: \"2\"\nlinters:\n enable:\n - errorlint\n - misspell\n - perfsprint\n - revive\n - testifylint\n setting"
},
{
"path": ".promu.yml",
"chars": 715,
"preview": "go:\n # Whenever the Go version is updated here,\n # .circle/config.yml should also be updated.\n version: 1.23\nreposito"
},
{
"path": "CHANGELOG.md",
"chars": 8668,
"preview": "## [2.3.12](https://github.com/pdf/zfs_exporter/compare/v2.3.11...v2.3.12) (2026-04-04)\n\n\n### Bug Fixes\n\n* **docs:** Upd"
},
{
"path": "LICENSE",
"chars": 1066,
"preview": "MIT License\n\nCopyright (c) 2018 Peter Fern\n\nPermission is hereby granted, free of charge, to any person obtaining a copy"
},
{
"path": "Makefile",
"chars": 862,
"preview": "# Copyright 2015 The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not "
},
{
"path": "Makefile.common",
"chars": 9330,
"preview": "# Copyright 2018 The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not "
},
{
"path": "README.md",
"chars": 6242,
"preview": "# ZFS Exporter\n\n[](https://github.com/p"
},
{
"path": "VERSION",
"chars": 7,
"preview": "2.3.12\n"
},
{
"path": "collector/cache.go",
"chars": 914,
"preview": "package collector\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype metricCache struct {\n\tcac"
},
{
"path": "collector/collector.go",
"chars": 4352,
"preview": "package collector\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/alecthomas/kingpin/v2\"\n\t\"gi"
},
{
"path": "collector/collector_test.go",
"chars": 921,
"preview": "package collector\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"githu"
},
{
"path": "collector/dataset.go",
"chars": 8795,
"preview": "package collector\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/cl"
},
{
"path": "collector/dataset_test.go",
"chars": 12776,
"preview": "package collector\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/pdf/zfs"
},
{
"path": "collector/pool.go",
"chars": 4615,
"preview": "package collector\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n\t\"github.com/prometheus/cl"
},
{
"path": "collector/pool_test.go",
"chars": 8475,
"preview": "package collector\n\nimport (\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs\"\n\t\"go.uber.o"
},
{
"path": "collector/transform.go",
"chars": 1683,
"preview": "package collector\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs\"\n)\n\ntype poolHealthCode int\n\nconst ("
},
{
"path": "collector/zfs.go",
"chars": 6675,
"preview": "package collector\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/pdf/zfs_ex"
},
{
"path": "collector/zfs_test.go",
"chars": 1276,
"preview": "package collector\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/pdf/zfs_exporter/v2/zfs/mock_zfs\"\n\t\"go.uber.or"
},
{
"path": "go.mod",
"chars": 1653,
"preview": "module github.com/pdf/zfs_exporter/v2\n\ngo 1.24.0\n\ntoolchain go1.24.2\n\nrequire (\n\tgithub.com/alecthomas/units v0.0.0-2024"
},
{
"path": "go.sum",
"chars": 8322,
"preview": "github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=\ngithub.com/alecthomas/kingpin/v2"
},
{
"path": "zfs/dataset.go",
"chars": 2230,
"preview": "package zfs\n\nimport (\n\t\"strings\"\n)\n\n// DatasetKind enum of supported dataset types\ntype DatasetKind string\n\nconst (\n\t// "
},
{
"path": "zfs/mock_zfs/mock_zfs.go",
"chars": 10286,
"preview": "// Code generated by MockGen. DO NOT EDIT.\n// Source: zfs.go\n//\n// Generated by this command:\n//\n//\tmockgen -source=zfs."
},
{
"path": "zfs/pool.go",
"chars": 2312,
"preview": "package zfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\n// PoolStatus enum contains status text\ntype PoolSt"
},
{
"path": "zfs/zfs.go",
"chars": 2396,
"preview": "//go:generate go tool go.uber.org/mock/mockgen -source=zfs.go -destination=mock_zfs/mock_zfs.go -package=mock_zfs\n\npacka"
},
{
"path": "zfs_exporter.go",
"chars": 3663,
"preview": "package main\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/pdf/zfs_exporter/v2/collector\"\n\t\"github.com/pdf/zfs_ex"
}
]
About this extraction
This page contains the full source code of the pdf/zfs_exporter GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 29 files (111.8 KB), approximately 34.0k tokens, and a symbol index with 175 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.