[
  {
    "path": ".dockerignore",
    "content": "# Created by .ignore support plugin (hsz.mobi)\n.idea/\n*.iml\n.git/"
  },
  {
    "path": ".gitignore",
    "content": "# Editor files\n*~\n.idea/\n\n# Test binary, build with `go test -c`\n*.test\n\n# Binaries\npostfix_exporter\n\n*.iml\nvendor/\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\n\nmatrix:\n  include:\n    - go: 1.16.x\n      env: VET=1 GO111MODULE=on\n    - go: 1.16.x\n      env: RACE=1 GO111MODULE=on\n    - go: 1.16.x\n      env: RUN386=1\n    - go: 1.15.x\n      env: VET=1 GO111MODULE=on\n    - go: 1.15.x\n      env: RACE=1 GO111MODULE=on\n    - go: 1.15.x\n      env: RUN386=1\n    - go: 1.14.x\n      env: VET=1 GO111MODULE=on\n    - go: 1.14.x\n      env: RACE=1 GO111MODULE=on\n    - go: 1.14.x\n      env: RUN386=1\n    - go: 1.13.x\n      env: VET=1 GO111MODULE=on\n    - go: 1.13.x\n      env: RACE=1 GO111MODULE=on\n    - go: 1.13.x\n      env: RUN386=1\n    - go: 1.12.x\n      env: GO111MODULE=on\n    - go: 1.11.x\n      env: GO111MODULE=on\n    - go: stable\n\naddons:\n  apt:\n    packages:\n      - libsystemd-dev\n\nenv:\n  global:\n    GO111MODULE: on\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "## 0.1.3 / 2021-05-02\n\n* [BUGFIX] Fix default for mail log path (/var/log/mail.log)\n\n## 0.1.2 / 2018-05-04\n\n* [ENHANCEMENT] Build tag for systemd\n\n## 0.1.1 / 2018-04-19\n\n* [BUGFIX] Non-updating metrics from systemd-journal fix\n\n## 0.1.0 / 2018-02-23\n\n* [ENHANCEMENT] Initial release, add changelog\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM golang:1.16 AS builder\nWORKDIR /src\n\n# avoid downloading the dependencies on succesive builds\nRUN apt-get update -qq && apt-get install -qqy \\\n  build-essential \\\n  libsystemd-dev\n\nCOPY go.mod go.sum ./\nRUN go mod download\nRUN go mod verify\n\nCOPY . .\n\n# Force the go compiler to use modules\nENV GO111MODULE=on\nRUN go test\nRUN go build -o /bin/postfix_exporter\n\nFROM debian:latest\nEXPOSE 9154\nWORKDIR /\nCOPY --from=builder /bin/postfix_exporter /bin/\nENTRYPOINT [\"/bin/postfix_exporter\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# Prometheus Postfix exporter\n\nPrometheus metrics exporter for [the Postfix mail server](http://www.postfix.org/).\nThis exporter provides histogram metrics for the size and age of messages stored in\nthe mail queue. It extracts these metrics from Postfix by connecting to\na UNIX socket under `/var/spool`. It also counts events by parsing Postfix's\nlog entries, using regular expression matching. The log entries are retrieved from\nthe systemd journal, the Docker logs, or from a log file.\n\n## Options\n\nThese options can be used when starting the `postfix_exporter`\n\n| Flag                     | Description                                          | Default                           |\n|--------------------------|------------------------------------------------------|-----------------------------------|\n| `--web.listen-address`   | Address to listen on for web interface and telemetry | `9154`                            |\n| `--web.telemetry-path`   | Path under which to expose metrics                   | `/metrics`                        |\n| `--postfix.showq_path`   | Path at which Postfix places its showq socket        | `/var/spool/postfix/public/showq` |\n| `--postfix.logfile_path` | Path where Postfix writes log entries                | `/var/log/mail.log`               |\n| `--log.unsupported`      | Log all unsupported lines                            | `false`                           |\n| `--docker.enable`        | Read from the Docker logs instead of a file          | `false`                           |\n| `--docker.container.id`  | The container to read Docker logs from               | `postfix`                         |\n| `--systemd.enable`       | Read from the systemd journal instead of file        | `false`                           |\n| `--systemd.unit`         | Name of the Postfix systemd unit                     | `postfix.service`                 |\n| `--systemd.slice`        | Name of the Postfix systemd slice.                   | `\"\"`                              |\n| `--systemd.journal_path` | Path to the systemd journal                          | `\"\"`                              |\n\n## Events from Docker\n\nPostfix servers running in a [Docker](https://www.docker.com/)\ncontainer can be monitored using the `--docker.enable` flag. The\ndefault container ID is `postfix`, but can be customized with the\n`--docker.container.id` flag.\n\nThe default is to connect to the local Docker, but this can be\ncustomized using [the `DOCKER_HOST` and\nsimilar](https://pkg.go.dev/github.com/docker/docker/client?tab=doc#NewEnvClient)\nenvironment variables.\n\n## Events from log file\n\nThe log file is tailed when processed. Rotating the log files while the exporter\nis running is OK. The path to the log file is specified with the\n`--postfix.logfile_path` flag.\n\n## Events from systemd\n\nRetrieval from the systemd journal is enabled with the `--systemd.enable` flag.\nThis overrides the log file setting.\nIt is possible to specify the unit (with `--systemd.unit`) or slice (with `--systemd.slice`).\nAdditionally, it is possible to read the journal from a directory with the `--systemd.journal_path` flag.\n\n## Build options\n\nDefault the exporter is build with systemd journal functionality (but it is disabled at default).\nBecause the systemd headers are required for building with systemd, there is\nan option to build the exporter without systemd. Use the build tag `nosystemd`.\n\n```\ngo build -tags nosystemd\n```\n"
  },
  {
    "path": "build_static.sh",
    "content": "#!/bin/sh\n\ndocker run -i -v `pwd`:/postfix_exporter golang:1.16 /bin/sh << 'EOF'\nset -ex\n\n# Install prerequisites for the build process.\napt-get update -q\napt-get install -yq libsystemd-dev\n\ncd /postfix_exporter\n\ngo get -d ./...\ngo build -a -tags static_all\nstrip postfix_exporter\nEOF\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/kumina/postfix_exporter\n\ngo 1.16\n\nrequire (\n\tgithub.com/Microsoft/go-winio v0.5.0 // indirect\n\tgithub.com/alecthomas/kingpin v2.2.6+incompatible\n\tgithub.com/coreos/go-systemd/v22 v22.0.0\n\tgithub.com/docker/distribution v2.7.1+incompatible // indirect\n\tgithub.com/docker/docker v1.13.1\n\tgithub.com/docker/go-connections v0.4.0 // indirect\n\tgithub.com/docker/go-units v0.4.0 // indirect\n\tgithub.com/nxadm/tail v1.4.8\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/prometheus/client_golang v1.4.1\n\tgithub.com/prometheus/client_model v0.2.0\n\tgithub.com/stretchr/testify v1.4.0\n)\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=\ngithub.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=\ngithub.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=\ngithub.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=\ngithub.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=\ngithub.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=\ngithub.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=\ngithub.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=\ngithub.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=\ngithub.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=\ngithub.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=\ngithub.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=\ngithub.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=\ngithub.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=\ngithub.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=\ngithub.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=\ngopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\n"
  },
  {
    "path": "logsource.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/alecthomas/kingpin\"\n)\n\n// A LogSourceFactory provides a repository of log sources that can be\n// instantiated from command line flags.\ntype LogSourceFactory interface {\n\t// Init adds the factory's struct fields as flags in the\n\t// application.\n\tInit(*kingpin.Application)\n\n\t// New attempts to create a new log source. This is called after\n\t// flags have been parsed. Returning `nil, nil`, means the user\n\t// didn't want this log source.\n\tNew(context.Context) (LogSourceCloser, error)\n}\n\ntype LogSourceCloser interface {\n\tio.Closer\n\tLogSource\n}\n\nvar logSourceFactories []LogSourceFactory\n\n// RegisterLogSourceFactory can be called from module `init` functions\n// to register factories.\nfunc RegisterLogSourceFactory(lsf LogSourceFactory) {\n\tlogSourceFactories = append(logSourceFactories, lsf)\n}\n\n// InitLogSourceFactories runs Init on all factories. The\n// initialization order is arbitrary, except `fileLogSourceFactory` is\n// always last (the fallback). The file log source must be last since\n// it's enabled by default.\nfunc InitLogSourceFactories(app *kingpin.Application) {\n\tRegisterLogSourceFactory(&fileLogSourceFactory{})\n\n\tfor _, f := range logSourceFactories {\n\t\tf.Init(app)\n\t}\n}\n\n// NewLogSourceFromFactories iterates through the factories and\n// attempts to instantiate a log source. The first factory to return\n// success wins.\nfunc NewLogSourceFromFactories(ctx context.Context) (LogSourceCloser, error) {\n\tfor _, f := range logSourceFactories {\n\t\tsrc, err := f.New(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif src != nil {\n\t\t\treturn src, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"no log source configured\")\n}\n"
  },
  {
    "path": "logsource_docker.go",
    "content": "// +build !nodocker\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com/alecthomas/kingpin\"\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/client\"\n)\n\n// A DockerLogSource reads log records from the given Docker\n// journal.\ntype DockerLogSource struct {\n\tclient      DockerClient\n\tcontainerID string\n\treader      *bufio.Reader\n}\n\n// A DockerClient is the client interface that client.Client\n// provides. See https://pkg.go.dev/github.com/docker/docker/client\ntype DockerClient interface {\n\tio.Closer\n\tContainerLogs(context.Context, string, types.ContainerLogsOptions) (io.ReadCloser, error)\n}\n\n// NewDockerLogSource returns a log source for reading Docker logs.\nfunc NewDockerLogSource(ctx context.Context, c DockerClient, containerID string) (*DockerLogSource, error) {\n\tr, err := c.ContainerLogs(ctx, containerID, types.ContainerLogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tFollow:     true,\n\t\tTail:       \"0\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogSrc := &DockerLogSource{\n\t\tclient:      c,\n\t\tcontainerID: containerID,\n\t\treader:      bufio.NewReader(r),\n\t}\n\n\treturn logSrc, nil\n}\n\nfunc (s *DockerLogSource) Close() error {\n\treturn s.client.Close()\n}\n\nfunc (s *DockerLogSource) Path() string {\n\treturn \"docker:\" + s.containerID\n}\n\nfunc (s *DockerLogSource) Read(ctx context.Context) (string, error) {\n\tline, err := s.reader.ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(line), nil\n}\n\n// A dockerLogSourceFactory is a factory that can create\n// DockerLogSources from command line flags.\ntype dockerLogSourceFactory struct {\n\tenable      bool\n\tcontainerID string\n}\n\nfunc (f *dockerLogSourceFactory) Init(app *kingpin.Application) {\n\tapp.Flag(\"docker.enable\", \"Read from Docker logs. Environment variable DOCKER_HOST can be used to change the address. See https://pkg.go.dev/github.com/docker/docker/client?tab=doc#NewEnvClient for more information.\").Default(\"false\").BoolVar(&f.enable)\n\tapp.Flag(\"docker.container.id\", \"ID/name of the Postfix Docker container.\").Default(\"postfix\").StringVar(&f.containerID)\n}\n\nfunc (f *dockerLogSourceFactory) New(ctx context.Context) (LogSourceCloser, error) {\n\tif !f.enable {\n\t\treturn nil, nil\n\t}\n\n\tlog.Println(\"Reading log events from Docker\")\n\tc, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewDockerLogSource(ctx, c, f.containerID)\n}\n\nfunc init() {\n\tRegisterLogSourceFactory(&dockerLogSourceFactory{})\n}\n"
  },
  {
    "path": "logsource_docker_test.go",
    "content": "// +build !nodocker\n\npackage main\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNewDockerLogSource(t *testing.T) {\n\tctx := context.Background()\n\tc := &fakeDockerClient{}\n\tsrc, err := NewDockerLogSource(ctx, c, \"acontainer\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewDockerLogSource failed: %v\", err)\n\t}\n\n\tassert.Equal(t, []string{\"acontainer\"}, c.containerLogsCalls, \"A call to ContainerLogs should be made.\")\n\n\tif err := src.Close(); err != nil {\n\t\tt.Fatalf(\"Close failed: %v\", err)\n\t}\n\n\tassert.Equal(t, 1, c.closeCalls, \"A call to Close should be made.\")\n}\n\nfunc TestDockerLogSource_Path(t *testing.T) {\n\tctx := context.Background()\n\tc := &fakeDockerClient{}\n\tsrc, err := NewDockerLogSource(ctx, c, \"acontainer\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewDockerLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\tassert.Equal(t, \"docker:acontainer\", src.Path(), \"Path should be set by New.\")\n}\n\nfunc TestDockerLogSource_Read(t *testing.T) {\n\tctx := context.Background()\n\n\tc := &fakeDockerClient{\n\t\tlogsReader: ioutil.NopCloser(strings.NewReader(\"Feb 13 23:31:30 ahost anid[123]: aline\\n\")),\n\t}\n\tsrc, err := NewDockerLogSource(ctx, c, \"acontainer\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewDockerLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\ts, err := src.Read(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Read failed: %v\", err)\n\t}\n\tassert.Equal(t, \"Feb 13 23:31:30 ahost anid[123]: aline\", s, \"Read should get data from the journal entry.\")\n}\n\ntype fakeDockerClient struct {\n\tlogsReader io.ReadCloser\n\n\tcontainerLogsCalls []string\n\tcloseCalls         int\n}\n\nfunc (c *fakeDockerClient) ContainerLogs(ctx context.Context, containerID string, opts types.ContainerLogsOptions) (io.ReadCloser, error) {\n\tc.containerLogsCalls = append(c.containerLogsCalls, containerID)\n\treturn c.logsReader, nil\n}\n\nfunc (c *fakeDockerClient) Close() error {\n\tc.closeCalls++\n\treturn nil\n}\n"
  },
  {
    "path": "logsource_file.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\n\t\"github.com/alecthomas/kingpin\"\n\t\"github.com/nxadm/tail\"\n)\n\n// A FileLogSource can read lines from a file.\ntype FileLogSource struct {\n\ttailer *tail.Tail\n}\n\n// NewFileLogSource creates a new log source, tailing the given file.\nfunc NewFileLogSource(path string) (*FileLogSource, error) {\n\ttailer, err := tail.TailFile(path, tail.Config{\n\t\tReOpen:    true,                               // reopen the file if it's rotated\n\t\tMustExist: true,                               // fail immediately if the file is missing or has incorrect permissions\n\t\tFollow:    true,                               // run in follow mode\n\t\tLocation:  &tail.SeekInfo{Whence: io.SeekEnd}, // seek to end of file\n\t\tLogger:    tail.DiscardingLogger,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FileLogSource{tailer}, nil\n}\n\nfunc (s *FileLogSource) Close() error {\n\tdefer s.tailer.Cleanup()\n\tgo func() {\n\t\t// Stop() waits for the tailer goroutine to shut down, but it\n\t\t// can be blocking on sending on the Lines channel...\n\t\tfor range s.tailer.Lines {\n\t\t}\n\t}()\n\treturn s.tailer.Stop()\n}\n\nfunc (s *FileLogSource) Path() string {\n\treturn s.tailer.Filename\n}\n\nfunc (s *FileLogSource) Read(ctx context.Context) (string, error) {\n\tselect {\n\tcase line, ok := <-s.tailer.Lines:\n\t\tif !ok {\n\t\t\treturn \"\", io.EOF\n\t\t}\n\t\treturn line.Text, nil\n\tcase <-ctx.Done():\n\t\treturn \"\", ctx.Err()\n\t}\n}\n\n// A fileLogSourceFactory is a factory than can create log sources\n// from command line flags.\n//\n// Because this factory is enabled by default, it must always be\n// registered last.\ntype fileLogSourceFactory struct {\n\tpath string\n}\n\nfunc (f *fileLogSourceFactory) Init(app *kingpin.Application) {\n\tapp.Flag(\"postfix.logfile_path\", \"Path where Postfix writes log entries.\").Default(\"/var/log/mail.log\").StringVar(&f.path)\n}\n\nfunc (f *fileLogSourceFactory) New(ctx context.Context) (LogSourceCloser, error) {\n\tif f.path == \"\" {\n\t\treturn nil, nil\n\t}\n\tlog.Printf(\"Reading log events from %s\", f.path)\n\treturn NewFileLogSource(f.path)\n}\n"
  },
  {
    "path": "logsource_file_test.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestFileLogSource_Path(t *testing.T) {\n\tpath, close, err := setupFakeLogFile()\n\tif err != nil {\n\t\tt.Fatalf(\"setupFakeTailer failed: %v\", err)\n\t}\n\tdefer close()\n\n\tsrc, err := NewFileLogSource(path)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFileLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\tassert.Equal(t, path, src.Path(), \"Path should be set by New.\")\n}\n\nfunc TestFileLogSource_Read(t *testing.T) {\n\tctx := context.Background()\n\n\tpath, close, err := setupFakeLogFile()\n\tif err != nil {\n\t\tt.Fatalf(\"setupFakeTailer failed: %v\", err)\n\t}\n\tdefer close()\n\n\tsrc, err := NewFileLogSource(path)\n\tif err != nil {\n\t\tt.Fatalf(\"NewFileLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\ts, err := src.Read(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Read failed: %v\", err)\n\t}\n\tassert.Equal(t, \"Feb 13 23:31:30 ahost anid[123]: aline\", s, \"Read should get data from the journal entry.\")\n}\n\nfunc setupFakeLogFile() (string, func(), error) {\n\tf, err := ioutil.TempFile(\"\", \"filelogsource\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer os.Remove(f.Name())\n\t\tdefer f.Close()\n\n\t\tfor {\n\t\t\t// The tailer seeks to the end and then does a\n\t\t\t// follow. Keep writing lines so we know it wakes up and\n\t\t\t// returns lines.\n\t\t\tfmt.Fprintln(f, \"Feb 13 23:31:30 ahost anid[123]: aline\")\n\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\t\t// continue\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn f.Name(), func() {\n\t\tcancel()\n\t\twg.Wait()\n\t}, nil\n}\n"
  },
  {
    "path": "logsource_systemd.go",
    "content": "// +build !nosystemd,linux\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/alecthomas/kingpin\"\n\t\"github.com/coreos/go-systemd/v22/sdjournal\"\n)\n\n// timeNow is a test fake injection point.\nvar timeNow = time.Now\n\n// A SystemdLogSource reads log records from the given Systemd\n// journal.\ntype SystemdLogSource struct {\n\tjournal SystemdJournal\n\tpath    string\n}\n\n// A SystemdJournal is the journal interface that sdjournal.Journal\n// provides. See https://pkg.go.dev/github.com/coreos/go-systemd/sdjournal?tab=doc\ntype SystemdJournal interface {\n\tio.Closer\n\tAddMatch(match string) error\n\tGetEntry() (*sdjournal.JournalEntry, error)\n\tNext() (uint64, error)\n\tSeekRealtimeUsec(usec uint64) error\n\tWait(timeout time.Duration) int\n}\n\n// NewSystemdLogSource returns a log source for reading Systemd\n// journal entries. `unit` and `slice` provide filtering if non-empty\n// (with `slice` taking precedence).\nfunc NewSystemdLogSource(j SystemdJournal, path, unit, slice string) (*SystemdLogSource, error) {\n\tlogSrc := &SystemdLogSource{journal: j, path: path}\n\n\tvar err error\n\tif slice != \"\" {\n\t\terr = logSrc.journal.AddMatch(\"_SYSTEMD_SLICE=\" + slice)\n\t} else if unit != \"\" {\n\t\terr = logSrc.journal.AddMatch(\"_SYSTEMD_UNIT=\" + unit)\n\t}\n\tif err != nil {\n\t\tlogSrc.journal.Close()\n\t\treturn nil, err\n\t}\n\n\t// Start at end of journal\n\tif err := logSrc.journal.SeekRealtimeUsec(uint64(timeNow().UnixNano() / 1000)); err != nil {\n\t\tlogSrc.journal.Close()\n\t\treturn nil, err\n\t}\n\n\tif r := logSrc.journal.Wait(1 * time.Second); r < 0 {\n\t\tlogSrc.journal.Close()\n\t\treturn nil, err\n\t}\n\n\treturn logSrc, nil\n}\n\nfunc (s *SystemdLogSource) Close() error {\n\treturn s.journal.Close()\n}\n\nfunc (s *SystemdLogSource) Path() string {\n\treturn s.path\n}\n\nfunc (s *SystemdLogSource) Read(ctx context.Context) (string, error) {\n\tc, err := s.journal.Next()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif c == 0 {\n\t\treturn \"\", io.EOF\n\t}\n\n\te, err := s.journal.GetEntry()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tts := time.Unix(0, int64(e.RealtimeTimestamp)*int64(time.Microsecond))\n\n\treturn fmt.Sprintf(\n\t\t\"%s %s %s[%s]: %s\",\n\t\tts.Format(time.Stamp),\n\t\te.Fields[\"_HOSTNAME\"],\n\t\te.Fields[\"SYSLOG_IDENTIFIER\"],\n\t\te.Fields[\"_PID\"],\n\t\te.Fields[\"MESSAGE\"],\n\t), nil\n}\n\n// A systemdLogSourceFactory is a factory that can create\n// SystemdLogSources from command line flags.\ntype systemdLogSourceFactory struct {\n\tenable            bool\n\tunit, slice, path string\n}\n\nfunc (f *systemdLogSourceFactory) Init(app *kingpin.Application) {\n\tapp.Flag(\"systemd.enable\", \"Read from the systemd journal instead of log\").Default(\"false\").BoolVar(&f.enable)\n\tapp.Flag(\"systemd.unit\", \"Name of the Postfix systemd unit.\").Default(\"postfix.service\").StringVar(&f.unit)\n\tapp.Flag(\"systemd.slice\", \"Name of the Postfix systemd slice. Overrides the systemd unit.\").Default(\"\").StringVar(&f.slice)\n\tapp.Flag(\"systemd.journal_path\", \"Path to the systemd journal\").Default(\"\").StringVar(&f.path)\n}\n\nfunc (f *systemdLogSourceFactory) New(ctx context.Context) (LogSourceCloser, error) {\n\tif !f.enable {\n\t\treturn nil, nil\n\t}\n\n\tlog.Println(\"Reading log events from systemd\")\n\tj, path, err := newSystemdJournal(f.path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewSystemdLogSource(j, path, f.unit, f.slice)\n}\n\n// newSystemdJournal creates a journal handle. It returns the handle\n// and a string representation of it. If `path` is empty, it connects\n// to the local journald.\nfunc newSystemdJournal(path string) (*sdjournal.Journal, string, error) {\n\tif path != \"\" {\n\t\tj, err := sdjournal.NewJournalFromDir(path)\n\t\treturn j, path, err\n\t}\n\n\tj, err := sdjournal.NewJournal()\n\treturn j, \"journald\", err\n}\n\nfunc init() {\n\tRegisterLogSourceFactory(&systemdLogSourceFactory{})\n}\n"
  },
  {
    "path": "logsource_systemd_test.go",
    "content": "// +build !nosystemd,linux\n\npackage main\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/coreos/go-systemd/v22/sdjournal\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNewSystemdLogSource(t *testing.T) {\n\tj := &fakeSystemdJournal{}\n\tsrc, err := NewSystemdLogSource(j, \"apath\", \"aunit\", \"aslice\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewSystemdLogSource failed: %v\", err)\n\t}\n\n\tassert.Equal(t, []string{\"_SYSTEMD_SLICE=aslice\"}, j.addMatchCalls, \"A match should be added for slice.\")\n\tassert.Equal(t, []uint64{1234567890000000}, j.seekRealtimeUsecCalls, \"A call to SeekRealtimeUsec should be made.\")\n\tassert.Equal(t, []time.Duration{1 * time.Second}, j.waitCalls, \"A call to Wait should be made.\")\n\n\tif err := src.Close(); err != nil {\n\t\tt.Fatalf(\"Close failed: %v\", err)\n\t}\n\n\tassert.Equal(t, 1, j.closeCalls, \"A call to Close should be made.\")\n}\n\nfunc TestSystemdLogSource_Path(t *testing.T) {\n\tj := &fakeSystemdJournal{}\n\tsrc, err := NewSystemdLogSource(j, \"apath\", \"aunit\", \"aslice\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewSystemdLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\tassert.Equal(t, \"apath\", src.Path(), \"Path should be set by New.\")\n}\n\nfunc TestSystemdLogSource_Read(t *testing.T) {\n\tctx := context.Background()\n\n\tj := &fakeSystemdJournal{\n\t\tgetEntryValues: []sdjournal.JournalEntry{\n\t\t\t{\n\t\t\t\tFields: map[string]string{\n\t\t\t\t\t\"_HOSTNAME\":         \"ahost\",\n\t\t\t\t\t\"SYSLOG_IDENTIFIER\": \"anid\",\n\t\t\t\t\t\"_PID\":              \"123\",\n\t\t\t\t\t\"MESSAGE\":           \"aline\",\n\t\t\t\t},\n\t\t\t\tRealtimeTimestamp: 1234567890000000,\n\t\t\t},\n\t\t},\n\t\tnextValues: []uint64{1},\n\t}\n\tsrc, err := NewSystemdLogSource(j, \"apath\", \"aunit\", \"aslice\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewSystemdLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\ts, err := src.Read(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Read failed: %v\", err)\n\t}\n\tassert.Equal(t, \"Feb 13 23:31:30 ahost anid[123]: aline\", s, \"Read should get data from the journal entry.\")\n}\n\nfunc TestSystemdLogSource_ReadEOF(t *testing.T) {\n\tctx := context.Background()\n\n\tj := &fakeSystemdJournal{\n\t\tnextValues: []uint64{0},\n\t}\n\tsrc, err := NewSystemdLogSource(j, \"apath\", \"aunit\", \"aslice\")\n\tif err != nil {\n\t\tt.Fatalf(\"NewSystemdLogSource failed: %v\", err)\n\t}\n\tdefer src.Close()\n\n\t_, err = src.Read(ctx)\n\tassert.Equal(t, io.EOF, err, \"Should interpret Next 0 as EOF.\")\n}\n\nfunc TestMain(m *testing.M) {\n\t// We compare Unix timestamps to date strings, so make it deterministic.\n\tos.Setenv(\"TZ\", \"UTC\")\n\ttimeNow = func() time.Time { return time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC) }\n\tdefer func() {\n\t\ttimeNow = time.Now\n\t}()\n\n\tos.Exit(m.Run())\n}\n\ntype fakeSystemdJournal struct {\n\tgetEntryValues []sdjournal.JournalEntry\n\tgetEntryError  error\n\tnextValues     []uint64\n\tnextError      error\n\n\taddMatchCalls         []string\n\tcloseCalls            int\n\tseekRealtimeUsecCalls []uint64\n\twaitCalls             []time.Duration\n}\n\nfunc (j *fakeSystemdJournal) AddMatch(match string) error {\n\tj.addMatchCalls = append(j.addMatchCalls, match)\n\treturn nil\n}\n\nfunc (j *fakeSystemdJournal) Close() error {\n\tj.closeCalls++\n\treturn nil\n}\n\nfunc (j *fakeSystemdJournal) GetEntry() (*sdjournal.JournalEntry, error) {\n\tif len(j.getEntryValues) == 0 {\n\t\treturn nil, j.getEntryError\n\t}\n\te := j.getEntryValues[0]\n\tj.getEntryValues = j.getEntryValues[1:]\n\treturn &e, nil\n}\n\nfunc (j *fakeSystemdJournal) Next() (uint64, error) {\n\tif len(j.nextValues) == 0 {\n\t\treturn 0, j.nextError\n\t}\n\tv := j.nextValues[0]\n\tj.nextValues = j.nextValues[1:]\n\treturn v, nil\n}\n\nfunc (j *fakeSystemdJournal) SeekRealtimeUsec(usec uint64) error {\n\tj.seekRealtimeUsecCalls = append(j.seekRealtimeUsecCalls, usec)\n\treturn nil\n}\n\nfunc (j *fakeSystemdJournal) Wait(timeout time.Duration) int {\n\tj.waitCalls = append(j.waitCalls, timeout)\n\treturn 0\n}\n"
  },
  {
    "path": "main.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/alecthomas/kingpin\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nfunc main() {\n\tvar (\n\t\tctx                 = context.Background()\n\t\tapp                 = kingpin.New(\"postfix_exporter\", \"Prometheus metrics exporter for postfix\")\n\t\tlistenAddress       = app.Flag(\"web.listen-address\", \"Address to listen on for web interface and telemetry.\").Default(\":9154\").String()\n\t\tmetricsPath         = app.Flag(\"web.telemetry-path\", \"Path under which to expose metrics.\").Default(\"/metrics\").String()\n\t\tpostfixShowqPath    = app.Flag(\"postfix.showq_path\", \"Path at which Postfix places its showq socket.\").Default(\"/var/spool/postfix/public/showq\").String()\n\t\tlogUnsupportedLines = app.Flag(\"log.unsupported\", \"Log all unsupported lines.\").Bool()\n\t)\n\n\tInitLogSourceFactories(app)\n\tkingpin.MustParse(app.Parse(os.Args[1:]))\n\n\tlogSrc, err := NewLogSourceFromFactories(ctx)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error opening log source: %s\", err)\n\t}\n\tdefer logSrc.Close()\n\n\texporter, err := NewPostfixExporter(\n\t\t*postfixShowqPath,\n\t\tlogSrc,\n\t\t*logUnsupportedLines,\n\t)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create PostfixExporter: %s\", err)\n\t}\n\tprometheus.MustRegister(exporter)\n\n\thttp.Handle(*metricsPath, promhttp.Handler())\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t_, err = w.Write([]byte(`\n\t\t\t<html>\n\t\t\t<head><title>Postfix Exporter</title></head>\n\t\t\t<body>\n\t\t\t<h1>Postfix Exporter</h1>\n\t\t\t<p><a href='` + *metricsPath + `'>Metrics</a></p>\n\t\t\t</body>\n\t\t\t</html>`))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\tctx, cancelFunc := context.WithCancel(ctx)\n\tdefer cancelFunc()\n\tgo exporter.StartMetricCollection(ctx)\n\tlog.Print(\"Listening on \", *listenAddress)\n\tlog.Fatal(http.ListenAndServe(*listenAddress, nil))\n}\n"
  },
  {
    "path": "mock/HistogramVecMock.go",
    "content": "package mock\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\ntype HistorgramVecMock struct {\n\tmock HistogramMock\n}\n\nfunc (m *HistorgramVecMock) Describe(chan<- *prometheus.Desc) {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) GetMetricWith(prometheus.Labels) (prometheus.Observer, error) {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) GetMetricWithLabelValues(lvs ...string) (prometheus.Observer, error) {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) With(prometheus.Labels) prometheus.Observer {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) WithLabelValues(...string) prometheus.Observer {\n\treturn m.mock\n}\n\nfunc (m *HistorgramVecMock) CurryWith(prometheus.Labels) (prometheus.ObserverVec, error) {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) MustCurryWith(prometheus.Labels) prometheus.ObserverVec {\n\tpanic(\"implement me\")\n}\n\nfunc (m *HistorgramVecMock) Collect(chan<- prometheus.Metric) {\n\tpanic(\"implement me\")\n}\nfunc (m *HistorgramVecMock) GetSum() float64 {\n\treturn *m.mock.sum\n}\n\nfunc NewHistogramVecMock() *HistorgramVecMock {\n\treturn &HistorgramVecMock{mock: *NewHistogramMock()}\n}\n"
  },
  {
    "path": "mock/HistorgramMock.go",
    "content": "package mock\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_model/go\"\n)\n\ntype HistogramMock struct {\n\tsum *float64\n}\n\nfunc NewHistogramMock() *HistogramMock {\n\treturn &HistogramMock{sum: new(float64)}\n}\n\nfunc (HistogramMock) Desc() *prometheus.Desc {\n\tpanic(\"implement me\")\n}\n\nfunc (HistogramMock) Write(*io_prometheus_client.Metric) error {\n\tpanic(\"implement me\")\n}\n\nfunc (HistogramMock) Describe(chan<- *prometheus.Desc) {\n\tpanic(\"implement me\")\n}\n\nfunc (HistogramMock) Collect(chan<- prometheus.Metric) {\n\tpanic(\"implement me\")\n}\n\nfunc (h HistogramMock) Observe(value float64) {\n\t*h.sum += value\n}\n"
  },
  {
    "path": "postfix_exporter.go",
    "content": "// Copyright 2017 Kumina, https://kumina.nl/\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\tpostfixUpDesc = prometheus.NewDesc(\n\t\tprometheus.BuildFQName(\"postfix\", \"\", \"up\"),\n\t\t\"Whether scraping Postfix's metrics was successful.\",\n\t\t[]string{\"path\"}, nil)\n)\n\n// PostfixExporter holds the state that should be preserved by the\n// Postfix Prometheus metrics exporter across scrapes.\ntype PostfixExporter struct {\n\tshowqPath           string\n\tlogSrc              LogSource\n\tlogUnsupportedLines bool\n\n\t// Metrics that should persist after refreshes, based on logs.\n\tcleanupProcesses                prometheus.Counter\n\tcleanupRejects                  prometheus.Counter\n\tcleanupNotAccepted              prometheus.Counter\n\tlmtpDelays                      *prometheus.HistogramVec\n\tpipeDelays                      *prometheus.HistogramVec\n\tqmgrInsertsNrcpt                prometheus.Histogram\n\tqmgrInsertsSize                 prometheus.Histogram\n\tqmgrRemoves                     prometheus.Counter\n\tqmgrExpires                     prometheus.Counter\n\tsmtpDelays                      *prometheus.HistogramVec\n\tsmtpTLSConnects                 *prometheus.CounterVec\n\tsmtpConnectionTimedOut          prometheus.Counter\n\tsmtpProcesses                    *prometheus.CounterVec\n\t// should be the same as smtpProcesses{status=deferred}, kept for compatibility, but this doesn't work !\n\tsmtpDeferreds                   prometheus.Counter\n\tsmtpdConnects                   prometheus.Counter\n\tsmtpdDisconnects                prometheus.Counter\n\tsmtpdFCrDNSErrors               prometheus.Counter\n\tsmtpdLostConnections            *prometheus.CounterVec\n\tsmtpdProcesses                  *prometheus.CounterVec\n\tsmtpdRejects                    *prometheus.CounterVec\n\tsmtpdSASLAuthenticationFailures prometheus.Counter\n\tsmtpdTLSConnects                *prometheus.CounterVec\n\tunsupportedLogEntries           *prometheus.CounterVec\n\t// same as smtpProcesses{status=deferred}, kept for compatibility\n\tsmtpStatusDeferred              prometheus.Counter\n\topendkimSignatureAdded          *prometheus.CounterVec\n\tbounceNonDelivery               prometheus.Counter\n\tvirtualDelivered                prometheus.Counter\n}\n\n// A LogSource is an interface to read log lines.\ntype LogSource interface {\n\t// Path returns a representation of the log location.\n\tPath() string\n\n\t// Read returns the next log line. Returns `io.EOF` at the end of\n\t// the log.\n\tRead(context.Context) (string, error)\n}\n\n// CollectShowqFromReader parses the output of Postfix's 'showq' command\n// and turns it into metrics.\n//\n// The output format of this command depends on the version of Postfix\n// used. Postfix 2.x uses a textual format, identical to the output of\n// the 'mailq' command. Postfix 3.x uses a binary format, where entries\n// are terminated using null bytes. Auto-detect the format by scanning\n// for null bytes in the first 128 bytes of output.\nfunc CollectShowqFromReader(file io.Reader, ch chan<- prometheus.Metric) error {\n\treader := bufio.NewReader(file)\n\tbuf, err := reader.Peek(128)\n\tif err != nil && err != io.EOF {\n\t\tlog.Printf(\"Could not read postfix output, %v\", err)\n\t}\n\tif bytes.IndexByte(buf, 0) >= 0 {\n\t\treturn CollectBinaryShowqFromReader(reader, ch)\n\t}\n\treturn CollectTextualShowqFromReader(reader, ch)\n}\n\n// CollectTextualShowqFromReader parses Postfix's textual showq output.\nfunc CollectTextualShowqFromReader(file io.Reader, ch chan<- prometheus.Metric) error {\n\n\t// Histograms tracking the messages by size and age.\n\tsizeHistogram := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"showq_message_size_bytes\",\n\t\t\tHelp:      \"Size of messages in Postfix's message queue, in bytes\",\n\t\t\tBuckets:   []float64{1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9},\n\t\t},\n\t\t[]string{\"queue\"})\n\tageHistogram := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"showq_message_age_seconds\",\n\t\t\tHelp:      \"Age of messages in Postfix's message queue, in seconds\",\n\t\t\tBuckets:   []float64{1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8},\n\t\t},\n\t\t[]string{\"queue\"})\n\n\terr := CollectTextualShowqFromScanner(sizeHistogram, ageHistogram, file)\n\n\tsizeHistogram.Collect(ch)\n\tageHistogram.Collect(ch)\n\treturn err\n}\n\nfunc CollectTextualShowqFromScanner(sizeHistogram prometheus.ObserverVec, ageHistogram prometheus.ObserverVec, file io.Reader) error {\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\t// Initialize all queue buckets to zero.\n\tfor _, q := range []string{\"active\", \"hold\", \"other\"} {\n\t\tsizeHistogram.WithLabelValues(q)\n\t\tageHistogram.WithLabelValues(q)\n\t}\n\n\tlocation, err := time.LoadLocation(\"Local\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// Regular expression for matching postqueue's output. Example:\n\t// \"A07A81514      5156 Tue Feb 14 13:13:54  MAILER-DAEMON\"\n\tmessageLine := regexp.MustCompile(`^[0-9A-F]+([\\*!]?) +(\\d+) (\\w{3} \\w{3} +\\d+ +\\d+:\\d{2}:\\d{2}) +`)\n\n\tfor scanner.Scan() {\n\t\ttext := scanner.Text()\n\t\tmatches := messageLine.FindStringSubmatch(text)\n\t\tif matches == nil {\n\t\t\tcontinue\n\t\t}\n\t\tqueueMatch := matches[1]\n\t\tsizeMatch := matches[2]\n\t\tdateMatch := matches[3]\n\n\t\t// Derive the name of the message queue.\n\t\tqueue := \"other\"\n\t\tif queueMatch == \"*\" {\n\t\t\tqueue = \"active\"\n\t\t} else if queueMatch == \"!\" {\n\t\t\tqueue = \"hold\"\n\t\t}\n\n\t\t// Parse the message size.\n\t\tsize, err := strconv.ParseFloat(sizeMatch, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Parse the message date. Unfortunately, the\n\t\t// output contains no year number. Assume it\n\t\t// applies to the last year for which the\n\t\t// message date doesn't exceed time.Now().\n\t\tdate, err := time.ParseInLocation(\"Mon Jan 2 15:04:05\", dateMatch, location)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnow := time.Now()\n\t\tdate = date.AddDate(now.Year(), 0, 0)\n\t\tif date.After(now) {\n\t\t\tdate = date.AddDate(-1, 0, 0)\n\t\t}\n\n\t\tsizeHistogram.WithLabelValues(queue).Observe(size)\n\t\tageHistogram.WithLabelValues(queue).Observe(now.Sub(date).Seconds())\n\t}\n\treturn scanner.Err()\n}\n\n// ScanNullTerminatedEntries is a splitting function for bufio.Scanner\n// to split entries by null bytes.\nfunc ScanNullTerminatedEntries(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif i := bytes.IndexByte(data, 0); i >= 0 {\n\t\t// Valid record found.\n\t\treturn i + 1, data[0:i], nil\n\t} else if atEOF && len(data) != 0 {\n\t\t// Data at the end of the file without a null terminator.\n\t\treturn 0, nil, errors.New(\"Expected null byte terminator\")\n\t} else {\n\t\t// Request more data.\n\t\treturn 0, nil, nil\n\t}\n}\n\n// CollectBinaryShowqFromReader parses Postfix's binary showq format.\nfunc CollectBinaryShowqFromReader(file io.Reader, ch chan<- prometheus.Metric) error {\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(ScanNullTerminatedEntries)\n\n\t// Histograms tracking the messages by size and age.\n\tsizeHistogram := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"showq_message_size_bytes\",\n\t\t\tHelp:      \"Size of messages in Postfix's message queue, in bytes\",\n\t\t\tBuckets:   []float64{1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9},\n\t\t},\n\t\t[]string{\"queue\"})\n\tageHistogram := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"showq_message_age_seconds\",\n\t\t\tHelp:      \"Age of messages in Postfix's message queue, in seconds\",\n\t\t\tBuckets:   []float64{1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8},\n\t\t},\n\t\t[]string{\"queue\"})\n\n\t// Initialize all queue buckets to zero.\n\tfor _, q := range []string{\"active\", \"deferred\", \"hold\", \"incoming\", \"maildrop\"} {\n\t\tsizeHistogram.WithLabelValues(q)\n\t\tageHistogram.WithLabelValues(q)\n\t}\n\n\tnow := float64(time.Now().UnixNano()) / 1e9\n\tqueue := \"unknown\"\n\tfor scanner.Scan() {\n\t\t// Parse a key/value entry.\n\t\tkey := scanner.Text()\n\t\tif len(key) == 0 {\n\t\t\t// Empty key means a record separator.\n\t\t\tqueue = \"unknown\"\n\t\t\tcontinue\n\t\t}\n\t\tif !scanner.Scan() {\n\t\t\treturn fmt.Errorf(\"key %q does not have a value\", key)\n\t\t}\n\t\tvalue := scanner.Text()\n\n\t\tif key == \"queue_name\" {\n\t\t\t// The name of the message queue.\n\t\t\tqueue = value\n\t\t} else if key == \"size\" {\n\t\t\t// Message size in bytes.\n\t\t\tsize, err := strconv.ParseFloat(value, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsizeHistogram.WithLabelValues(queue).Observe(size)\n\t\t} else if key == \"time\" {\n\t\t\t// Message time as a UNIX timestamp.\n\t\t\tutime, err := strconv.ParseFloat(value, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tageHistogram.WithLabelValues(queue).Observe(now - utime)\n\t\t}\n\t}\n\n\tsizeHistogram.Collect(ch)\n\tageHistogram.Collect(ch)\n\treturn scanner.Err()\n}\n\n// CollectShowqFromSocket collects Postfix queue statistics from a socket.\nfunc CollectShowqFromSocket(path string, ch chan<- prometheus.Metric) error {\n\tfd, err := net.Dial(\"unix\", path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\treturn CollectShowqFromReader(fd, ch)\n}\n\n// Patterns for parsing log messages.\nvar (\n\tlogLine                             = regexp.MustCompile(` ?(postfix|opendkim)(/(\\w+))?\\[\\d+\\]: ((?:(warning|error|fatal|panic): )?.*)`)\n\tlmtpPipeSMTPLine                    = regexp.MustCompile(`, relay=(\\S+), .*, delays=([0-9\\.]+)/([0-9\\.]+)/([0-9\\.]+)/([0-9\\.]+), `)\n\tqmgrInsertLine                      = regexp.MustCompile(`:.*, size=(\\d+), nrcpt=(\\d+) `)\n\tqmgrExpiredLine                     = regexp.MustCompile(`:.*, status=(expired|force-expired), returned to sender`)\n\tsmtpStatusLine                      = regexp.MustCompile(`, status=(\\w+) `)\n\tsmtpTLSLine                         = regexp.MustCompile(`^(\\S+) TLS connection established to \\S+: (\\S+) with cipher (\\S+) \\((\\d+)/(\\d+) bits\\)`)\n\tsmtpConnectionTimedOut              = regexp.MustCompile(`^connect\\s+to\\s+(.*)\\[(.*)\\]:(\\d+):\\s+(Connection timed out)$`)\n\tsmtpdFCrDNSErrorsLine               = regexp.MustCompile(`^warning: hostname \\S+ does not resolve to address `)\n\tsmtpdProcessesSASLLine              = regexp.MustCompile(`: client=.*, sasl_method=(\\S+)`)\n\tsmtpdRejectsLine                    = regexp.MustCompile(`^NOQUEUE: reject: RCPT from \\S+: ([0-9]+) `)\n\tsmtpdLostConnectionLine             = regexp.MustCompile(`^lost connection after (\\w+) from `)\n\tsmtpdSASLAuthenticationFailuresLine = regexp.MustCompile(`^warning: \\S+: SASL \\S+ authentication failed: `)\n\tsmtpdTLSLine                        = regexp.MustCompile(`^(\\S+) TLS connection established from \\S+: (\\S+) with cipher (\\S+) \\((\\d+)/(\\d+) bits\\)`)\n\topendkimSignatureAdded              = regexp.MustCompile(`^[\\w\\d]+: DKIM-Signature field added \\(s=(\\w+), d=(.*)\\)$`)\n\tbounceNonDeliveryLine               = regexp.MustCompile(`: sender non-delivery notification: `)\n)\n\n// CollectFromLogline collects metrict from a Postfix log line.\nfunc (e *PostfixExporter) CollectFromLogLine(line string) {\n\t// Strip off timestamp, hostname, etc.\n\tlogMatches := logLine.FindStringSubmatch(line)\n\n\tif logMatches == nil {\n\t\t// Unknown log entry format.\n\t\te.addToUnsupportedLine(line, \"\", \"\")\n\t\treturn\n\t}\n\tprocess := logMatches[1]\n\tlevel := logMatches[5]\n\tremainder := logMatches[4]\n\tswitch process {\n\tcase \"postfix\":\n\t\t// Group patterns to check by Postfix service.\n\t\tsubprocess := logMatches[3]\n\t\tswitch subprocess {\n\t\tcase \"cleanup\":\n\t\t\tif strings.Contains(remainder, \": message-id=<\") {\n\t\t\t\te.cleanupProcesses.Inc()\n\t\t\t} else if strings.Contains(remainder, \": reject: \") {\n\t\t\t\te.cleanupRejects.Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"lmtp\":\n\t\t\tif lmtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder); lmtpMatches != nil {\n\t\t\t\taddToHistogramVec(e.lmtpDelays, lmtpMatches[2], \"LMTP pdelay\", \"before_queue_manager\")\n\t\t\t\taddToHistogramVec(e.lmtpDelays, lmtpMatches[3], \"LMTP adelay\", \"queue_manager\")\n\t\t\t\taddToHistogramVec(e.lmtpDelays, lmtpMatches[4], \"LMTP sdelay\", \"connection_setup\")\n\t\t\t\taddToHistogramVec(e.lmtpDelays, lmtpMatches[5], \"LMTP xdelay\", \"transmission\")\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"pipe\":\n\t\t\tif pipeMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder); pipeMatches != nil {\n\t\t\t\taddToHistogramVec(e.pipeDelays, pipeMatches[2], \"PIPE pdelay\", pipeMatches[1], \"before_queue_manager\")\n\t\t\t\taddToHistogramVec(e.pipeDelays, pipeMatches[3], \"PIPE adelay\", pipeMatches[1], \"queue_manager\")\n\t\t\t\taddToHistogramVec(e.pipeDelays, pipeMatches[4], \"PIPE sdelay\", pipeMatches[1], \"connection_setup\")\n\t\t\t\taddToHistogramVec(e.pipeDelays, pipeMatches[5], \"PIPE xdelay\", pipeMatches[1], \"transmission\")\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"qmgr\":\n\t\t\tif qmgrInsertMatches := qmgrInsertLine.FindStringSubmatch(remainder); qmgrInsertMatches != nil {\n\t\t\t\taddToHistogram(e.qmgrInsertsSize, qmgrInsertMatches[1], \"QMGR size\")\n\t\t\t\taddToHistogram(e.qmgrInsertsNrcpt, qmgrInsertMatches[2], \"QMGR nrcpt\")\n\t\t\t} else if strings.HasSuffix(remainder, \": removed\") {\n\t\t\t\te.qmgrRemoves.Inc()\n\t\t\t} else if qmgrExpired := qmgrExpiredLine.FindStringSubmatch(remainder); qmgrExpired != nil {\n\t\t\t\te.qmgrExpires.Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"smtp\":\n\t\t\tif smtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder); smtpMatches != nil {\n\t\t\t\taddToHistogramVec(e.smtpDelays, smtpMatches[2], \"before_queue_manager\", \"\")\n\t\t\t\taddToHistogramVec(e.smtpDelays, smtpMatches[3], \"queue_manager\", \"\")\n\t\t\t\taddToHistogramVec(e.smtpDelays, smtpMatches[4], \"connection_setup\", \"\")\n\t\t\t\taddToHistogramVec(e.smtpDelays, smtpMatches[5], \"transmission\", \"\")\n\t\t\t\tif smtpStatusMatches := smtpStatusLine.FindStringSubmatch(remainder); smtpStatusMatches != nil {\n\t\t\t\t\te.smtpProcesses.WithLabelValues(smtpStatusMatches[1]).Inc()\n\t\t\t\t\tif smtpStatusMatches[1] == \"deferred\" {\n\t\t\t\t\t\te.smtpStatusDeferred.Inc()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if smtpTLSMatches := smtpTLSLine.FindStringSubmatch(remainder); smtpTLSMatches != nil {\n\t\t\t\te.smtpTLSConnects.WithLabelValues(smtpTLSMatches[1:]...).Inc()\n\t\t\t} else if smtpMatches := smtpConnectionTimedOut.FindStringSubmatch(remainder); smtpMatches != nil {\n\t\t\t\te.smtpConnectionTimedOut.Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"smtpd\":\n\t\t\tif strings.HasPrefix(remainder, \"connect from \") {\n\t\t\t\te.smtpdConnects.Inc()\n\t\t\t} else if strings.HasPrefix(remainder, \"disconnect from \") {\n\t\t\t\te.smtpdDisconnects.Inc()\n\t\t\t} else if smtpdFCrDNSErrorsLine.MatchString(remainder) {\n\t\t\t\te.smtpdFCrDNSErrors.Inc()\n\t\t\t} else if smtpdLostConnectionMatches := smtpdLostConnectionLine.FindStringSubmatch(remainder); smtpdLostConnectionMatches != nil {\n\t\t\t\te.smtpdLostConnections.WithLabelValues(smtpdLostConnectionMatches[1]).Inc()\n\t\t\t} else if smtpdProcessesSASLMatches := smtpdProcessesSASLLine.FindStringSubmatch(remainder); smtpdProcessesSASLMatches != nil {\n\t\t\t\te.smtpdProcesses.WithLabelValues(smtpdProcessesSASLMatches[1]).Inc()\n\t\t\t} else if strings.Contains(remainder, \": client=\") {\n\t\t\t\te.smtpdProcesses.WithLabelValues(\"\").Inc()\n\t\t\t} else if smtpdRejectsMatches := smtpdRejectsLine.FindStringSubmatch(remainder); smtpdRejectsMatches != nil {\n\t\t\t\te.smtpdRejects.WithLabelValues(smtpdRejectsMatches[1]).Inc()\n\t\t\t} else if smtpdSASLAuthenticationFailuresLine.MatchString(remainder) {\n\t\t\t\te.smtpdSASLAuthenticationFailures.Inc()\n\t\t\t} else if smtpdTLSMatches := smtpdTLSLine.FindStringSubmatch(remainder); smtpdTLSMatches != nil {\n\t\t\t\te.smtpdTLSConnects.WithLabelValues(smtpdTLSMatches[1:]...).Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t\t}\n\t\tcase \"bounce\":\n\t\t\tif bounceMatches := bounceNonDeliveryLine.FindStringSubmatch(remainder); bounceMatches != nil {\n\t\t\t\te.bounceNonDelivery.Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, process, level)\n\t\t\t}\n\t\tcase \"virtual\":\n\t\t\tif strings.HasSuffix(remainder, \", status=sent (delivered to maildir)\") {\n\t\t\t\te.virtualDelivered.Inc()\n\t\t\t} else {\n\t\t\t\te.addToUnsupportedLine(line, process, level)\n\t\t\t}\n\t\tdefault:\n\t\t\te.addToUnsupportedLine(line, subprocess, level)\n\t\t}\n\tcase \"opendkim\":\n\t\tif opendkimMatches := opendkimSignatureAdded.FindStringSubmatch(remainder); opendkimMatches != nil {\n\t\t\te.opendkimSignatureAdded.WithLabelValues(opendkimMatches[1], opendkimMatches[2]).Inc()\n\t\t} else {\n\t\t\te.addToUnsupportedLine(line, process, level)\n\t\t}\n\tdefault:\n\t\t// Unknown log entry format.\n\t\te.addToUnsupportedLine(line, process, level)\n\t}\n}\n\nfunc (e *PostfixExporter) addToUnsupportedLine(line string, subprocess string, level string) {\n\tif e.logUnsupportedLines {\n\t\tlog.Printf(\"Unsupported Line: %v\", line)\n\t}\n\te.unsupportedLogEntries.WithLabelValues(subprocess, level).Inc()\n}\n\nfunc addToHistogram(h prometheus.Histogram, value, fieldName string) {\n\tfloat, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't convert value '%s' for %v: %v\", value, fieldName, err)\n\t}\n\th.Observe(float)\n}\nfunc addToHistogramVec(h *prometheus.HistogramVec, value, fieldName string, labels ...string) {\n\tfloat, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't convert value '%s' for %v: %v\", value, fieldName, err)\n\t}\n\th.WithLabelValues(labels...).Observe(float)\n}\n\n// NewPostfixExporter creates a new Postfix exporter instance.\nfunc NewPostfixExporter(showqPath string, logSrc LogSource, logUnsupportedLines bool) (*PostfixExporter, error) {\n\ttimeBuckets := []float64{1e-3, 1e-2, 1e-1, 1.0, 10, 1 * 60, 1 * 60 * 60, 24 * 60 * 60, 2 * 24 * 60 * 60}\n\treturn &PostfixExporter{\n\t\tlogUnsupportedLines: logUnsupportedLines,\n\t\tshowqPath:           showqPath,\n\t\tlogSrc:              logSrc,\n\n\t\tcleanupProcesses: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"cleanup_messages_processed_total\",\n\t\t\tHelp:      \"Total number of messages processed by cleanup.\",\n\t\t}),\n\t\tcleanupRejects: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"cleanup_messages_rejected_total\",\n\t\t\tHelp:      \"Total number of messages rejected by cleanup.\",\n\t\t}),\n\t\tcleanupNotAccepted: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"cleanup_messages_not_accepted_total\",\n\t\t\tHelp:      \"Total number of messages not accepted by cleanup.\",\n\t\t}),\n\t\tlmtpDelays: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"lmtp_delivery_delay_seconds\",\n\t\t\t\tHelp:      \"LMTP message processing time in seconds.\",\n\t\t\t\tBuckets:   timeBuckets,\n\t\t\t},\n\t\t\t[]string{\"stage\"}),\n\t\tpipeDelays: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"pipe_delivery_delay_seconds\",\n\t\t\t\tHelp:      \"Pipe message processing time in seconds.\",\n\t\t\t\tBuckets:   timeBuckets,\n\t\t\t},\n\t\t\t[]string{\"relay\", \"stage\"}),\n\t\tqmgrInsertsNrcpt: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"qmgr_messages_inserted_receipients\",\n\t\t\tHelp:      \"Number of receipients per message inserted into the mail queues.\",\n\t\t\tBuckets:   []float64{1, 2, 4, 8, 16, 32, 64, 128},\n\t\t}),\n\t\tqmgrInsertsSize: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"qmgr_messages_inserted_size_bytes\",\n\t\t\tHelp:      \"Size of messages inserted into the mail queues in bytes.\",\n\t\t\tBuckets:   []float64{1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9},\n\t\t}),\n\t\tqmgrRemoves: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"qmgr_messages_removed_total\",\n\t\t\tHelp:      \"Total number of messages removed from mail queues.\",\n\t\t}),\n\t\tqmgrExpires: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"qmgr_messages_expired_total\",\n\t\t\tHelp:      \"Total number of messages expired from mail queues.\",\n\t\t}),\n\t\tsmtpDelays: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtp_delivery_delay_seconds\",\n\t\t\t\tHelp:      \"SMTP message processing time in seconds.\",\n\t\t\t\tBuckets:   timeBuckets,\n\t\t\t},\n\t\t\t[]string{\"stage\"}),\n\t\tsmtpTLSConnects: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtp_tls_connections_total\",\n\t\t\t\tHelp:      \"Total number of outgoing TLS connections.\",\n\t\t\t},\n\t\t\t[]string{\"trust\", \"protocol\", \"cipher\", \"secret_bits\", \"algorithm_bits\"}),\n\t\tsmtpDeferreds: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtp_deferred_messages_total\",\n\t\t\tHelp:      \"Total number of messages that have been deferred on SMTP.\",\n\t\t}),\n\t\tsmtpProcesses: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtp_messages_processed_total\",\n\t\t\t\tHelp:      \"Total number of messages that have been processed by the smtp process.\",\n\t\t\t},\n\t\t\t[]string{\"status\"}),\n\t\tsmtpConnectionTimedOut: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtp_connection_timed_out_total\",\n\t\t\tHelp:      \"Total number of messages that have been deferred on SMTP.\",\n\t\t}),\n\t\tsmtpdConnects: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtpd_connects_total\",\n\t\t\tHelp:      \"Total number of incoming connections.\",\n\t\t}),\n\t\tsmtpdDisconnects: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtpd_disconnects_total\",\n\t\t\tHelp:      \"Total number of incoming disconnections.\",\n\t\t}),\n\t\tsmtpdFCrDNSErrors: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtpd_forward_confirmed_reverse_dns_errors_total\",\n\t\t\tHelp:      \"Total number of connections for which forward-confirmed DNS cannot be resolved.\",\n\t\t}),\n\t\tsmtpdLostConnections: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtpd_connections_lost_total\",\n\t\t\t\tHelp:      \"Total number of connections lost.\",\n\t\t\t},\n\t\t\t[]string{\"after_stage\"}),\n\t\tsmtpdProcesses: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtpd_messages_processed_total\",\n\t\t\t\tHelp:      \"Total number of messages processed.\",\n\t\t\t},\n\t\t\t[]string{\"sasl_method\"}),\n\t\tsmtpdRejects: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtpd_messages_rejected_total\",\n\t\t\t\tHelp:      \"Total number of NOQUEUE rejects.\",\n\t\t\t},\n\t\t\t[]string{\"code\"}),\n\t\tsmtpdSASLAuthenticationFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtpd_sasl_authentication_failures_total\",\n\t\t\tHelp:      \"Total number of SASL authentication failures.\",\n\t\t}),\n\t\tsmtpdTLSConnects: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"smtpd_tls_connections_total\",\n\t\t\t\tHelp:      \"Total number of incoming TLS connections.\",\n\t\t\t},\n\t\t\t[]string{\"trust\", \"protocol\", \"cipher\", \"secret_bits\", \"algorithm_bits\"}),\n\t\tunsupportedLogEntries: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"postfix\",\n\t\t\t\tName:      \"unsupported_log_entries_total\",\n\t\t\t\tHelp:      \"Log entries that could not be processed.\",\n\t\t\t},\n\t\t\t[]string{\"service\", \"level\"}),\n\t\tsmtpStatusDeferred: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"smtp_status_deferred\",\n\t\t\tHelp:      \"Total number of messages deferred.\",\n\t\t}),\n\t\topendkimSignatureAdded: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tNamespace: \"opendkim\",\n\t\t\t\tName:      \"signatures_added_total\",\n\t\t\t\tHelp:      \"Total number of messages signed.\",\n\t\t\t},\n\t\t\t[]string{\"subject\", \"domain\"},\n\t\t),\n\t\tbounceNonDelivery: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"bounce_non_delivery_notification_total\",\n\t\t\tHelp:      \"Total number of non delivery notification sent by bounce.\",\n\t\t}),\n\t\tvirtualDelivered: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tName:      \"virtual_delivered_total\",\n\t\t\tHelp:      \"Total number of mail delivered to a virtual mailbox.\",\n\t\t}),\n\t}, nil\n}\n\n// Describe the Prometheus metrics that are going to be exported.\nfunc (e *PostfixExporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- postfixUpDesc\n\n\tif e.logSrc == nil {\n\t\treturn\n\t}\n\tch <- e.cleanupProcesses.Desc()\n\tch <- e.cleanupRejects.Desc()\n\tch <- e.cleanupNotAccepted.Desc()\n\te.lmtpDelays.Describe(ch)\n\te.pipeDelays.Describe(ch)\n\tch <- e.qmgrInsertsNrcpt.Desc()\n\tch <- e.qmgrInsertsSize.Desc()\n\tch <- e.qmgrRemoves.Desc()\n\tch <- e.qmgrExpires.Desc()\n\te.smtpDelays.Describe(ch)\n\te.smtpTLSConnects.Describe(ch)\n\tch <- e.smtpDeferreds.Desc()\n\te.smtpProcesses.Describe(ch)\n\tch <- e.smtpdConnects.Desc()\n\tch <- e.smtpdDisconnects.Desc()\n\tch <- e.smtpdFCrDNSErrors.Desc()\n\te.smtpdLostConnections.Describe(ch)\n\te.smtpdProcesses.Describe(ch)\n\te.smtpdRejects.Describe(ch)\n\tch <- e.smtpdSASLAuthenticationFailures.Desc()\n\te.smtpdTLSConnects.Describe(ch)\n\tch <- e.smtpStatusDeferred.Desc()\n\te.unsupportedLogEntries.Describe(ch)\n\te.smtpConnectionTimedOut.Describe(ch)\n\te.opendkimSignatureAdded.Describe(ch)\n\tch <- e.bounceNonDelivery.Desc()\n\tch <- e.virtualDelivered.Desc()\n}\n\nfunc (e *PostfixExporter) StartMetricCollection(ctx context.Context) {\n\tif e.logSrc == nil {\n\t\treturn\n\t}\n\n\tgaugeVec := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"postfix\",\n\t\t\tSubsystem: \"\",\n\t\t\tName:      \"up\",\n\t\t\tHelp:      \"Whether scraping Postfix's metrics was successful.\",\n\t\t},\n\t\t[]string{\"path\"})\n\tgauge := gaugeVec.WithLabelValues(e.logSrc.Path())\n\tdefer gauge.Set(0)\n\n\tfor {\n\t\tline, err := e.logSrc.Read(ctx)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Printf(\"Couldn't read journal: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\te.CollectFromLogLine(line)\n\t\tgauge.Set(1)\n\t}\n}\n\n// Collect metrics from Postfix's showq socket and its log file.\nfunc (e *PostfixExporter) Collect(ch chan<- prometheus.Metric) {\n\terr := CollectShowqFromSocket(e.showqPath, ch)\n\tif err == nil {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t1.0,\n\t\t\te.showqPath)\n\t} else {\n\t\tlog.Printf(\"Failed to scrape showq socket: %s\", err)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tpostfixUpDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\t0.0,\n\t\t\te.showqPath)\n\t}\n\n\tif e.logSrc == nil {\n\t\treturn\n\t}\n\tch <- e.cleanupProcesses\n\tch <- e.cleanupRejects\n\tch <- e.cleanupNotAccepted\n\te.lmtpDelays.Collect(ch)\n\te.pipeDelays.Collect(ch)\n\tch <- e.qmgrInsertsNrcpt\n\tch <- e.qmgrInsertsSize\n\tch <- e.qmgrRemoves\n\tch <- e.qmgrExpires\n\te.smtpDelays.Collect(ch)\n\te.smtpTLSConnects.Collect(ch)\n\tch <- e.smtpDeferreds\n\te.smtpProcesses.Collect(ch)\n\tch <- e.smtpdConnects\n\tch <- e.smtpdDisconnects\n\tch <- e.smtpdFCrDNSErrors\n\te.smtpdLostConnections.Collect(ch)\n\te.smtpdProcesses.Collect(ch)\n\te.smtpdRejects.Collect(ch)\n\tch <- e.smtpdSASLAuthenticationFailures\n\te.smtpdTLSConnects.Collect(ch)\n\tch <- e.smtpStatusDeferred\n\te.unsupportedLogEntries.Collect(ch)\n\tch <- e.smtpConnectionTimedOut\n\te.opendkimSignatureAdded.Collect(ch)\n\tch <- e.bounceNonDelivery\n\tch <- e.virtualDelivered\n}\n"
  },
  {
    "path": "postfix_exporter_test.go",
    "content": "package main\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tio_prometheus_client \"github.com/prometheus/client_model/go\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestPostfixExporter_CollectFromLogline(t *testing.T) {\n\ttype fields struct {\n\t\tshowqPath                       string\n\t\tlogSrc                          LogSource\n\t\tcleanupProcesses                prometheus.Counter\n\t\tcleanupRejects                  prometheus.Counter\n\t\tcleanupNotAccepted              prometheus.Counter\n\t\tlmtpDelays                      *prometheus.HistogramVec\n\t\tpipeDelays                      *prometheus.HistogramVec\n\t\tqmgrInsertsNrcpt                prometheus.Histogram\n\t\tqmgrInsertsSize                 prometheus.Histogram\n\t\tqmgrRemoves                     prometheus.Counter\n\t\tqmgrExpires                     prometheus.Counter\n\t\tsmtpDelays                      *prometheus.HistogramVec\n\t\tsmtpTLSConnects                 *prometheus.CounterVec\n\t\tsmtpDeferreds                   prometheus.Counter\n\t\tsmtpStatusDeferred              prometheus.Counter\n\t\tsmtpProcesses                   *prometheus.CounterVec\n\t\tsmtpdConnects                   prometheus.Counter\n\t\tsmtpdDisconnects                prometheus.Counter\n\t\tsmtpdFCrDNSErrors               prometheus.Counter\n\t\tsmtpdLostConnections            *prometheus.CounterVec\n\t\tsmtpdProcesses                  *prometheus.CounterVec\n\t\tsmtpdRejects                    *prometheus.CounterVec\n\t\tsmtpdSASLAuthenticationFailures prometheus.Counter\n\t\tsmtpdTLSConnects                *prometheus.CounterVec\n\t\tbounceNonDelivery               prometheus.Counter\n\t\tvirtualDelivered                prometheus.Counter\n\t\tunsupportedLogEntries           *prometheus.CounterVec\n\t}\n\ttype args struct {\n\t\tline                   []string\n\t\tremovedCount           int\n\t\texpiredCount           int\n\t\tsaslFailedCount        int\n\t\toutgoingTLS            int\n\t\tsmtpdMessagesProcessed int\n\t\tsmtpMessagesProcessed  int\n\t\tbounceNonDelivery  int\n\t\tvirtualDelivered       int\n\t\tunsupportedLogEntries  []string\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t}{\n\t\t{\n\t\t\tname: \"Single line\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Feb 11 16:49:24 letterman postfix/qmgr[8204]: AAB4D259B1: removed\",\n\t\t\t\t},\n\t\t\t\tremovedCount:    1,\n\t\t\t\tsaslFailedCount: 0,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tqmgrRemoves:           prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Multiple lines\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Feb 11 16:49:24 letterman postfix/qmgr[8204]: AAB4D259B1: removed\",\n\t\t\t\t\t\"Feb 11 16:49:24 letterman postfix/qmgr[8204]: C2032259E6: removed\",\n\t\t\t\t\t\"Feb 11 16:49:24 letterman postfix/qmgr[8204]: B83C4257DC: removed\",\n\t\t\t\t\t\"Feb 11 16:49:24 letterman postfix/qmgr[8204]: 721BE256EA: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: CA94A259EB: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: AC1E3259E1: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: D114D221E3: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: A55F82104D: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: D6DAA259BC: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: E3908259F0: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: 0CBB8259BF: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: EA3AD259F2: removed\",\n\t\t\t\t\t\"Feb 11 16:49:25 letterman postfix/qmgr[8204]: DDEF824B48: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 289AF21DB9: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 6192B260E8: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: F2831259F4: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 09D60259F8: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 13A19259FA: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 2D42722065: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 746E325A0E: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 4D2F125A02: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: E30BC259EF: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: DC88924DA1: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 2164B259FD: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 8C30525A14: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: 8DCCE25A15: removed\",\n\t\t\t\t\t\"Feb 11 16:49:26 letterman postfix/qmgr[8204]: C5217255D5: removed\",\n\t\t\t\t\t\"Feb 11 16:49:27 letterman postfix/qmgr[8204]: D8EE625A28: removed\",\n\t\t\t\t\t\"Feb 11 16:49:27 letterman postfix/qmgr[8204]: 9AD7C25A19: removed\",\n\t\t\t\t\t\"Feb 11 16:49:27 letterman postfix/qmgr[8204]: D0EEE2596C: removed\",\n\t\t\t\t\t\"Feb 11 16:49:27 letterman postfix/qmgr[8204]: DFE732172E: removed\",\n\t\t\t\t},\n\t\t\t\tremovedCount:    31,\n\t\t\t\tsaslFailedCount: 0,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tqmgrRemoves:           prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"qmgr expired\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Apr 10 14:50:16 mail postfix/qmgr[3663]: BACE842E72: from=<noreply@domain.com>, status=expired, returned to sender\",\n\t\t\t\t\t\"Apr 10 14:50:16 mail postfix/qmgr[3663]: BACE842E73: from=<noreply@domain.com>, status=force-expired, returned to sender\",\n\t\t\t\t},\n\t\t\t\texpiredCount:    2,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tqmgrExpires:           prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"SASL Failed\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: SASL authentication failure: cannot connect to saslauthd server: Permission denied\",\n\t\t\t\t\t\"Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: SASL authentication failure: Password verification failed\",\n\t\t\t\t\t\"Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: laptop.local[192.168.1.2]: SASL PLAIN authentication failed: generic failure\",\n\t\t\t\t},\n\t\t\t\tsaslFailedCount: 1,\n\t\t\t\tremovedCount:    0,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tsmtpdSASLAuthenticationFailures: prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t\tsmtpProcesses:                   prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"status\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"SASL login\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Oct 30 13:19:26 mailgw-out1 postfix/smtpd[27530]: EB4B2C19E2: client=xxx[1.2.3.4], sasl_method=PLAIN, sasl_username=user@domain\",\n\t\t\t\t\t\"Feb 24 16:42:00 letterman postfix/smtpd[24906]: 1CF582025C: client=xxx[2.3.4.5]\",\n\t\t\t\t},\n\t\t\t\tremovedCount:           0,\n\t\t\t\tsaslFailedCount:        0,\n\t\t\t\toutgoingTLS:            0,\n\t\t\t\tsmtpdMessagesProcessed: 2,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t\tsmtpdProcesses:        prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"sasl_method\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Issue #35\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Jul 24 04:38:17 mail postfix/smtp[30582]: Verified TLS connection established to gmail-smtp-in.l.google.com[108.177.14.26]:25: TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256\",\n\t\t\t\t\t\"Jul 24 03:28:15 mail postfix/smtp[24052]: Verified TLS connection established to mx2.comcast.net[2001:558:fe21:2a::6]:25: TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)\",\n\t\t\t\t},\n\t\t\t\tremovedCount:    0,\n\t\t\t\tsaslFailedCount: 0,\n\t\t\t\toutgoingTLS:     2,\n\t\t\t\tsmtpdMessagesProcessed: 0,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t\tsmtpTLSConnects:       prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"Verified\", \"TLSv1.2\", \"ECDHE-RSA-AES256-GCM-SHA384\", \"256\", \"256\"}),\n\t\t\t\tsmtpProcesses:         prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"status\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Testing delays\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Feb 24 16:18:40 letterman postfix/smtp[59649]: 5270320179: to=<hebj@telia.com>, relay=mail.telia.com[81.236.60.210]:25, delay=2017, delays=0.1/2017/0.03/0.05, dsn=2.0.0, status=sent (250 2.0.0 6FVIjIMwUJwU66FVIjAEB0 mail accepted for delivery)\",\n\t\t\t\t},\n\t\t\t\tremovedCount:           0,\n\t\t\t\tsaslFailedCount:        0,\n\t\t\t\toutgoingTLS:            0,\n\t\t\t\tsmtpdMessagesProcessed: 0,\n\t\t\t\tsmtpMessagesProcessed:  1,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tsmtpDelays: prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{\"stage\"}),\n\t\t\t\tsmtpProcesses: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"status\"}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Testing different smtp statuses\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Dec 29 02:54:09 mail postfix/smtp[7648]: 732BB407C3: host mail.domain.com[1.1.1.1] said: 451 DT:SPM 163 mx13,P8CowECpNVM_oEVaenoEAQ--.23796S3 1514512449, please try again 15min later (in reply to end of DATA command)\",\n\t\t\t\t\t\"Dec 29 02:54:12 mail postfix/smtp[7648]: 732BB407C3: to=<redacted@domain.com>, relay=mail.domain.com[1.1.1.1]:25, delay=6.2, delays=0.1/0/5.2/0.87, dsn=4.0.0, status=deferred (host mail.domain.com[1.1.1.1] said: 451 DT:SPM 163 mx40,WsCowAAnEhlCoEVa5GjcAA--.20089S3 1514512452, please try again 15min later (in reply to end of DATA command))\",\n\t\t\t\t\t\"Dec 29 03:03:48 mail postfix/smtp[8492]: 732BB407C3: to=<redacted@domain.com>, relay=mail.domain.com[1.1.1.1]:25, delay=582, delays=563/16/1.7/0.81, dsn=5.0.0, status=bounced (host mail.domain.com[1.1.1.1] said: 554 DT:SPM 163 mx9,O8CowEDJVFKCokVaRhz+AA--.26016S3 1514513028,please see http://mail.domain.com/help/help_spam.htm?ip= (in reply to end of DATA command))\",\n\t\t\t\t\t\"Dec 29 03:03:48 mail postfix/bounce[9321]: 732BB407C3: sender non-delivery notification: 5DE184083C\",\n\t\t\t\t},\n\t\t\t\tsmtpMessagesProcessed:  2,\n\t\t\t\tbounceNonDelivery: 1,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t\tsmtpDelays: prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{\"stage\"}),\n\t\t\t\tsmtpStatusDeferred: prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t\tsmtpProcesses: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"status\"}),\n\t\t\t\tbounceNonDelivery: prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Testing virtual delivered\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Apr  7 15:35:20 123-mail postfix/virtual[20235]: 199041033BE: to=<me@domain.fr>, relay=virtual, delay=0.08, delays=0.08/0/0/0, dsn=2.0.0, status=sent (delivered to maildir)\",\n\t\t\t\t},\n\t\t\t\tvirtualDelivered: 1,\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tvirtualDelivered: prometheus.NewCounter(prometheus.CounterOpts{}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Testing levels of unsupported entries\",\n\t\t\targs: args{\n\t\t\t\tline: []string{\n\t\t\t\t\t\"Feb 14 19:05:25 123-mail postfix/smtpd[1517]: table hash:/etc/postfix/virtual_mailbox_maps(0,lock|fold_fix) has changed -- restarting\",\n\t\t\t\t    \"Mar 16 12:28:02 123-mail postfix/smtpd[16268]: fatal: file /etc/postfix/main.cf: parameter default_privs: unknown user name value: nobody\",\n\t\t\t\t\t\"Mar 16 23:30:44 123-mail postfix/qmgr[29980]: warning: please avoid flushing the whole queue when you have\",\n\t\t\t\t\t\"Mar 16 23:30:44 123-mail postfix/qmgr[29980]: warning: lots of deferred mail, that is bad for performance\",\n\t\t\t\t},\n\t\t\t\tunsupportedLogEntries: []string{\n\t\t\t\t\t`label:<name:\"level\" value:\"\" > label:<name:\"service\" value:\"smtpd\" > counter:<value:1 > `,\n\t\t\t\t\t`label:<name:\"level\" value:\"fatal\" > label:<name:\"service\" value:\"smtpd\" > counter:<value:1 > `,\n\t\t\t\t\t`label:<name:\"level\" value:\"warning\" > label:<name:\"service\" value:\"qmgr\" > counter:<value:2 > `,\n\t\t\t\t},\n\t\t\t},\n\t\t\tfields: fields{\n\t\t\t\tunsupportedLogEntries: prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{\"service\", \"level\"}),\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\te := &PostfixExporter{\n\t\t\t\tshowqPath:                       tt.fields.showqPath,\n\t\t\t\tlogSrc:                          tt.fields.logSrc,\n\t\t\t\tcleanupProcesses:                tt.fields.cleanupProcesses,\n\t\t\t\tcleanupRejects:                  tt.fields.cleanupRejects,\n\t\t\t\tcleanupNotAccepted:              tt.fields.cleanupNotAccepted,\n\t\t\t\tlmtpDelays:                      tt.fields.lmtpDelays,\n\t\t\t\tpipeDelays:                      tt.fields.pipeDelays,\n\t\t\t\tqmgrInsertsNrcpt:                tt.fields.qmgrInsertsNrcpt,\n\t\t\t\tqmgrInsertsSize:                 tt.fields.qmgrInsertsSize,\n\t\t\t\tqmgrRemoves:                     tt.fields.qmgrRemoves,\n\t\t\t\tqmgrExpires:                     tt.fields.qmgrExpires,\n\t\t\t\tsmtpDelays:                      tt.fields.smtpDelays,\n\t\t\t\tsmtpTLSConnects:                 tt.fields.smtpTLSConnects,\n\t\t\t\tsmtpDeferreds:                   tt.fields.smtpDeferreds,\n\t\t\t\tsmtpStatusDeferred:              tt.fields.smtpStatusDeferred,\n\t\t\t\tsmtpProcesses:                   tt.fields.smtpProcesses,\n\t\t\t\tsmtpdConnects:                   tt.fields.smtpdConnects,\n\t\t\t\tsmtpdDisconnects:                tt.fields.smtpdDisconnects,\n\t\t\t\tsmtpdFCrDNSErrors:               tt.fields.smtpdFCrDNSErrors,\n\t\t\t\tsmtpdLostConnections:            tt.fields.smtpdLostConnections,\n\t\t\t\tsmtpdProcesses:                  tt.fields.smtpdProcesses,\n\t\t\t\tsmtpdRejects:                    tt.fields.smtpdRejects,\n\t\t\t\tsmtpdSASLAuthenticationFailures: tt.fields.smtpdSASLAuthenticationFailures,\n\t\t\t\tsmtpdTLSConnects:                tt.fields.smtpdTLSConnects,\n\t\t\t\tbounceNonDelivery:               tt.fields.bounceNonDelivery,\n\t\t\t\tvirtualDelivered:                tt.fields.virtualDelivered,\n\t\t\t\tunsupportedLogEntries:           tt.fields.unsupportedLogEntries,\n\t\t\t\tlogUnsupportedLines:             true,\n\t\t\t}\n\t\t\tfor _, line := range tt.args.line {\n\t\t\t\te.CollectFromLogLine(line)\n\t\t\t}\n\t\t\tassertCounterEquals(t, e.qmgrRemoves, tt.args.removedCount, \"Wrong number of lines counted\")\n\t\t\tassertCounterEquals(t, e.qmgrExpires, tt.args.expiredCount, \"Wrong number of qmgr expired lines counted\")\n\t\t\tassertCounterEquals(t, e.smtpdSASLAuthenticationFailures, tt.args.saslFailedCount, \"Wrong number of Sasl counter counted\")\n\t\t\tassertCounterEquals(t, e.smtpTLSConnects, tt.args.outgoingTLS, \"Wrong number of TLS connections counted\")\n\t\t\tassertCounterEquals(t, e.smtpdProcesses, tt.args.smtpdMessagesProcessed, \"Wrong number of smtpd messages processed\")\n\t\t\tassertCounterEquals(t, e.smtpProcesses, tt.args.smtpMessagesProcessed, \"Wrong number of smtp messages processed\")\n\t\t\tassertCounterEquals(t, e.bounceNonDelivery, tt.args.bounceNonDelivery, \"Wrong number of non delivery notifications\")\n\t\t\tassertCounterEquals(t, e.virtualDelivered, tt.args.virtualDelivered, \"Wrong number of delivered mails\")\n\t\t\tassertVecMetricsEquals(t, e.unsupportedLogEntries, tt.args.unsupportedLogEntries, \"Wrong number of unsupportedLogEntries\")\n\t\t})\n\t}\n}\nfunc assertCounterEquals(t *testing.T, counter prometheus.Collector, expected int, message string) {\n\n\tif counter != nil && expected > 0 {\n\t\tswitch counter.(type) {\n\t\tcase *prometheus.CounterVec:\n\t\t\tcounter := counter.(*prometheus.CounterVec)\n\t\t\tmetricsChan := make(chan prometheus.Metric)\n\t\t\tgo func() {\n\t\t\t\tcounter.Collect(metricsChan)\n\t\t\t\tclose(metricsChan)\n\t\t\t}()\n\t\t\tvar count int = 0\n\t\t\tfor metric := range metricsChan {\n\t\t\t\tmetricDto := io_prometheus_client.Metric{}\n\t\t\t\tmetric.Write(&metricDto)\n\t\t\t\tcount += int(*metricDto.Counter.Value)\n\t\t\t}\n\t\t\tassert.Equal(t, expected, count, message)\n\t\tcase prometheus.Counter:\n\t\t\tmetricsChan := make(chan prometheus.Metric)\n\t\t\tgo func() {\n\t\t\t\tcounter.Collect(metricsChan)\n\t\t\t\tclose(metricsChan)\n\t\t\t}()\n\t\t\tvar count int = 0\n\t\t\tfor metric := range metricsChan {\n\t\t\t\tmetricDto := io_prometheus_client.Metric{}\n\t\t\t\tmetric.Write(&metricDto)\n\t\t\t\tcount += int(*metricDto.Counter.Value)\n\t\t\t}\n\t\t\tassert.Equal(t, expected, count, message)\n\t\tdefault:\n\t\t\tt.Fatal(\"Type not implemented\")\n\t\t}\n\t}\n}\nfunc assertVecMetricsEquals(t *testing.T, counter *prometheus.CounterVec, expected []string, message string) {\n\tif expected != nil {\n\t\tmetricsChan := make(chan prometheus.Metric)\n\t\tgo func() {\n\t\t\tcounter.Collect(metricsChan)\n\t\t\tclose(metricsChan)\n\t\t}()\n\t\tvar res []string\n\t\tfor metric := range metricsChan {\n\t\t\tmetricDto := io_prometheus_client.Metric{}\n\t\t\tmetric.Write(&metricDto)\n\t\t\tres = append(res, metricDto.String())\n\t\t}\n\t\tassert.Equal(t, expected, res, message)\n\t}\n}\n"
  },
  {
    "path": "showq_test.go",
    "content": "package main\n\nimport (\n\t\"github.com/kumina/postfix_exporter/mock\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestCollectShowqFromReader(t *testing.T) {\n\ttype args struct {\n\t\tfile string\n\t}\n\ttests := []struct {\n\t\tname               string\n\t\targs               args\n\t\twantErr            bool\n\t\texpectedTotalCount float64\n\t}{\n\t\t{\n\t\t\tname: \"basic test\",\n\t\t\targs: args{\n\t\t\t\tfile: \"testdata/showq.txt\",\n\t\t\t},\n\t\t\twantErr:            false,\n\t\t\texpectedTotalCount: 118702,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tfile, err := os.Open(tt.args.file)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\n\t\t\tsizeHistogram := mock.NewHistogramVecMock()\n\t\t\tageHistogram := mock.NewHistogramVecMock()\n\t\t\tif err := CollectTextualShowqFromScanner(sizeHistogram, ageHistogram, file); (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"CollectShowqFromReader() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedTotalCount, sizeHistogram.GetSum(), \"Expected a lot more data.\")\n\t\t\tassert.Less(t, 0.0, ageHistogram.GetSum(), \"Age not greater than 0\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "testdata/showq.txt",
    "content": "-Queue ID- --Size-- ----Arrival Time---- -Sender/Recipient-------\nC420820802*    4387 Mon Feb 24 13:35:18  sender@example.com\n                                         recipient@lerum.se\n\n8D5D4205B9*    4033 Mon Feb 24 13:22:16  sender@example.com\n                                         recipient@lerum.se\n\n7465520414*    4043 Mon Feb 24 13:22:16  sender@example.com\n                                         recipient@lerum.se\n\n3E2F72070A*    5301 Mon Feb 24 13:35:39  sender@example.com\n                                         recipient@hotmail.se\n\n542032060A*    5828 Mon Feb 24 13:34:46  sender@example.com\n                                         recipient@skatteverket.se\n\n4B96A2037C*    9868 Mon Feb 24 13:32:03  sender@example.com\n                                         recipient@lerum.se\n\nE88EA20796*    5956 Mon Feb 24 13:34:55  sender@example.com\n                                         recipient@edu.halmstad.se\n\n8C9912052C*    4047 Mon Feb 24 13:22:16  sender@example.com\n                                         recipient@lerum.se\n\n70BDA2079B*    4404 Mon Feb 24 13:35:18  sender@example.com\n                                         recipient@lerum.se\n\n76E6A20536*    3875 Mon Feb 24 13:21:20  sender@example.com\n                                         recipient@lerum.se\n\n92C662062A*    3864 Mon Feb 24 13:21:20  sender@example.com\n                                         recipient@lerum.se\n\nBA9BC2071E*    4387 Mon Feb 24 13:35:18  sender@example.com\n                                         recipient@lerum.se\n\n9A67020670*    4393 Mon Feb 24 13:34:06  sender@example.com\n                                         recipient@lerum.se\n\n651AC20138*    3872 Mon Feb 24 13:23:17  sender@example.com\n                                         recipient@lerum.se\n\n4F16D20516*    4052 Mon Feb 24 13:24:38  sender@example.com\n                                         recipient@lerum.se\n\nC9C4A20501*    5099 Mon Feb 24 13:14:10  sender@example.com\n                                         recipient@haninge.se\n\n0572820D64     4098 Sat Feb 22 00:44:54  sender@example.com\n(host mail.wekudata.com[37.208.0.7] said: 452 4.2.2 Quota exceeded (rehanna@stahlstierna.se) (in reply to RCPT TO command))\n                                         recipient@stahlstierna.se\n\n0B2C320952     4173 Sat Feb 22 00:42:07  sender@example.com\n(host alt1.gmail-smtp-in.l.google.com[108.177.97.26] said: 452-4.2.2 The email account that you tried to reach is over quota. Please direct 452-4.2.2 the recipient to 452 4.2.2  https://support.google.com/mail/?p=OverQuotaTemp q24si6538316pgt.498 - gsmtp (in reply to RCPT TO command))\n                                         recipient@gmail.com\n\n0CC2B22124    10926 Fri Feb 21 13:31:58  sender@example.com\n(host alt1.gmail-smtp-in.l.google.com[108.177.97.26] said: 452-4.2.2 The email account that you tried to reach is over quota. Please direct 452-4.2.2 the recipient to 452 4.2.2  https://support.google.com/mail/?p=OverQuotaTemp f10si11999094pgj.597 - gsmtp (in reply to RCPT TO command))\n                                         recipient@gmail.com\n\n0C84020606     4898 Mon Feb 24 08:30:34  sender@example.com\n(host alt1.gmail-smtp-in.l.google.com[108.177.97.26] said: 452-4.2.2 The email account that you tried to reach is over quota. Please direct 452-4.2.2 the recipient to 452 4.2.2  https://support.google.com/mail/?p=OverQuotaTemp 2si12536346pld.231 - gsmtp (in reply to RCPT TO command))\n                                         recipient@gmail.com\n\n04EAA203C0     4133 Mon Feb 24 12:21:58  sender@example.com\n(host alt1.gmail-smtp-in.l.google.com[108.177.97.26] said: 452-4.2.2 The email account that you tried to reach is over quota. Please direct 452-4.2.2 the recipient to 452 4.2.2  https://support.google.com/mail/?p=OverQuotaTemp i16si12220651pfq.60 - gsmtp (in reply to RCPT TO command))\n                                         recipient@gmail.com\n\n00C33202B6     4823 Mon Feb 24 11:32:37  sender@example.com\n                   (connect to gafe.se[151.252.30.111]:25: Connection refused)\n                                         recipient@gafe.se\n\n046E0218CA     4154 Mon Feb 24 00:13:12  sender@example.com\n(host alt1.gmail-smtp-in.l.google.com[108.177.97.26] said: 452-4.2.2 The email account that you tried to reach is over quota. Please direct 452-4.2.2 the recipient to 452 4.2.2  https://support.google.com/mail/?p=OverQuotaTemp y1si11835269pgi.474 - gsmtp (in reply to RCPT TO command))\n                                         recipient@gmail.com\n\n06373212DC     4088 Sat Feb 22 00:34:11  sender@example.com\n           (connect to smtp.falun.se[192.121.234.25]:25: Connection timed out)\n                                         recipient@utb.falun.se\n"
  }
]