[
  {
    "path": ".dockerignore",
    "content": "tests/\ndocs/"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  -   package-ecosystem: \"github-actions\"\n      directory: \"/\"\n      schedule:\n          interval: \"weekly\"\n  -   package-ecosystem: \"gomod\"\n      directory: \"/\"\n      schedule:\n          interval: \"weekly\""
  },
  {
    "path": ".github/workflows/close_state_issues.yml",
    "content": "# © 2024 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: Close inactive issues\non:\n  schedule:\n    - cron: \"30 1 * * *\"\n\njobs:\n  close-issues:\n    runs-on: ubuntu-latest\n    permissions:\n      issues: write\n      pull-requests: write\n    steps:\n      - uses: actions/stale@v10\n        with:\n          days-before-issue-stale: 365\n          days-before-issue-close: 30\n          stale-issue-label: \"stale\"\n          stale-issue-message: \"This issue is stale because it has been open for 12 months with no activity.\"\n          close-issue-message: \"This issue was closed because it has been inactive for 30 days since being marked as stale.\"\n          days-before-pr-stale: -1\n          days-before-pr-close: -1\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/docs.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: docs\non:\n  workflow_dispatch:\n  push:\n    branches:\n      - \"docs-*\"\n    tags:\n      - \"v*\"\n\nenv:\n  MKDOCS_MATERIAL_VER: 8.3.4\n\njobs:\n  publish:\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n\n    steps:\n      - uses: actions/checkout@v4\n      - run: docker run -v $(pwd):/docs --entrypoint ash squidfunk/mkdocs-material:${MKDOCS_MATERIAL_VER} -c 'git config --global --add safe.directory /docs; mkdocs gh-deploy --force --strict'\n"
  },
  {
    "path": ".github/workflows/lint.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n---\nname: Linter\non:\n  workflow_dispatch:\n  # pull_request:\n  # push:\n  #   branches:\n  #     - \"main\"\n  #     - \"!releases/**\"\n  \nenv:\n  GOVER: 1.24.12\n\njobs:\n  lint:\n    runs-on: ubuntu-22.04\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-go@v6\n        with:\n          go-version: ${{ env.GOVER }}\n      - name: golangci-lint\n        uses: golangci/golangci-lint-action@v9\n        with:\n          # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.\n          version: v1.46\n          # Optional: working directory, useful for monorepos\n          # working-directory: somedir\n\n          # Optional: golangci-lint command line arguments.\n          args: --verbose --max-same-issues=0 --max-issues-per-linter=0  --out-format=github-actions\n\n          # Optional: show only new issues if it's a pull request. The default value is `false`.\n          # only-new-issues: true"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n---\nname: release\non:\n  push:\n    tags:\n      - v*\nenv:\n  GOVER: 1.24.12\n  GORELEASER_VER: v2.13.3\n\njobs:\n  test:\n    runs-on: ubuntu-22.04\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-go@v6\n        with:\n          go-version: ${{ env.GOVER }}\n      - run: ./tests/run_tests.sh\n        env:\n          CGO_ENABLED: 0\n\n  release:\n    runs-on: ubuntu-22.04\n    permissions:\n      contents: write\n      packages: write\n\n    needs:\n      - test\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - uses: actions/setup-go@v6\n        with:\n          go-version: ${{ env.GOVER }}\n\n      - name: Login to github container registry\n        uses: docker/login-action@v3\n        with:\n          registry: ghcr.io\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v3\n\n      - name: Release with goreleaser\n        uses: goreleaser/goreleaser-action@v6\n        with:\n          version: ${{ env.GORELEASER_VER }}\n          args: release --clean --verbose\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n---\nname: Test\non:\n  workflow_dispatch:\n  pull_request:\n  push:\n    branches:\n      - \"main\"\n      - \"!releases/**\"\nenv:\n  GOVER: 1.24.12\n\njobs:\n  test:\n    runs-on: ubuntu-22.04\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-go@v6\n        with:\n          go-version: ${{ env.GOVER }}\n      - run: ./tests/run_tests.sh\n        env:\n          CGO_ENABLED: 0\n      # run staticcheck\n      - uses: reviewdog/action-staticcheck@73cfd0daa6fdbba9a858dcb0f62844012fa8317d\n        with:\n          github_token: ${{ secrets.GITHUB_TOKEN }}\n          # Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review].\n          reporter: github-pr-review\n          # Report all results.\n          filter_mode: nofilter\n          # Exit with 1 when it find at least one finding.\n          fail_on_error: true\n"
  },
  {
    "path": ".gitignore",
    "content": "_test/\ntests/clab-*\ntests/srl-*\ntests/.*clab.yaml\ntests/collector/suite/*/clab-*\nbuilds/\ndist\n*.log\ngnmic\n*.tmp\n*.work*\n.idea\ntests/collector"
  },
  {
    "path": ".golangci.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlinters-settings:\n  govet:\n    # check-shadowing: true\n    enable:\n      - fieldalignment\n  gocyclo:\n    min-complexity: 20\n  dupl:\n    threshold: 100\n  goconst:\n    min-len: 2\n    min-occurrences: 4\n  lll:\n    line-length: 140\n  nolintlint:\n    # allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space)\n    allow-unused: false # report any unused nolint directives\n    require-explanation: false # don't require an explanation for nolint directives\n    require-specific: false # don't require nolint directives to be specific about which linter is being skipped\n\nlinters:\n  disable-all: true\n  enable:\n    - asciicheck\n    - bodyclose\n    # - deadcode\n    # - depguard\n    - dogsled\n    # - dupl\n    # - errcheck\n    # - exhaustive\n    # - exportloopref\n    # - funlen\n    # - gci\n    # - gochecknoglobals\n    # - gochecknoinits\n    # - gocognit\n    - goconst\n    # - gocritic\n    # - gocyclo\n    # - godox\n    - gofmt\n    # - gofumpt\n    - goheader\n    # - goimports\n    # - gomnd\n    # - gomodguard\n    # - goprintffuncname\n    # - gosec\n    # - gosimple\n    # - govet\n    # - ineffassign\n    # - lll\n    - misspell\n    # - nakedret\n    # - nestif\n    # - nlreturn\n    # - noctx\n    - nolintlint\n    - prealloc\n    # - revive\n    # - rowserrcheck\n    # - sqlclosecheck\n    # - staticcheck\n    # - structcheck\n    # - stylecheck\n    # - typecheck\n    # - unconvert\n    # - unparam\n    - unused\n    # - varcheck\n    # - whitespace\n    # - wsl\n\nrun:\n  concurrency: 4\n  timeout: 5m"
  },
  {
    "path": ".goreleaser.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: 2\nproject_name: gnmic\nbuilds:\n  - env:\n      - CGO_ENABLED=0\n    ldflags:\n      - -s -w -X github.com/openconfig/gnmic/pkg/version.Version={{.Version}} -X github.com/openconfig/gnmic/pkg/version.Commit={{.ShortCommit}} -X github.com/openconfig/gnmic/pkg/version.Date={{.Date}} -X github.com/openconfig/gnmic/pkg/version.GitURL={{.GitURL}}\n    goos:\n      - linux\n      - darwin\n    goarch:\n      - amd64\n      - \"386\"\n      - arm\n      - arm64\ndockers:\n  - goos: linux\n    goarch: amd64\n    ids:\n      - gnmic\n    image_templates:\n      - &amd64_latest_image \"ghcr.io/openconfig/gnmic:latest-amd64\"\n      - &amd64_versioned_image 'ghcr.io/openconfig/gnmic:{{ replace .Version \"v\" \"\"}}-amd64'\n    dockerfile: goreleaser-alpine.dockerfile\n    skip_push: false\n    use: buildx\n    build_flag_templates:\n      - \"--platform=linux/amd64\"\n      - \"--provenance=false\"\n      - \"--sbom=false\"\n  - goos: linux\n    goarch: arm64\n    ids:\n      - gnmic\n    image_templates:\n      - &arm64_latest_image \"ghcr.io/openconfig/gnmic:latest-arm64\"\n      - &arm64_versioned_image 'ghcr.io/openconfig/gnmic:{{ replace .Version \"v\" \"\"}}-arm64'\n    dockerfile: goreleaser-alpine.dockerfile\n    skip_push: false\n    use: buildx\n    build_flag_templates:\n      - \"--platform=linux/arm64\"\n      - \"--provenance=false\"\n      - \"--sbom=false\"\n  - goos: linux\n    goarch: amd64\n    ids:\n      - gnmic\n    image_templates:\n      - \"ghcr.io/openconfig/gnmic:latest-scratch\"\n      - 'ghcr.io/openconfig/gnmic:{{ replace .Version \"v\" \"\"}}-scratch'\n    dockerfile: goreleaser-scratch.dockerfile\n    skip_push: false\n    use: buildx\n    build_flag_templates:\n      - \"--platform=linux/amd64\"\n      - \"--provenance=false\"\n      - \"--sbom=false\"\ndocker_manifests:\n  - name_template: 'ghcr.io/openconfig/gnmic:{{ replace .Version \"v\" \"\" }}'\n    image_templates:\n      - *amd64_versioned_image\n      - *arm64_versioned_image\n  - name_template: \"{{- if not .IsSnapshot}}ghcr.io/openconfig/gnmic:latest{{- end}}\"\n    image_templates:\n      - *amd64_latest_image\n      - *arm64_latest_image\narchives:\n  - name_template: >-\n      {{ .ProjectName }}_\n      {{- .Version }}_\n      {{- title .Os }}_\n      {{- if eq .Arch \"amd64\" }}x86_64\n      {{- else if eq .Arch \"386\" }}i386\n      {{- else if eq .Arch \"arm\" }}armv7\n      {{- else if eq .Arch \"arm64\" }}aarch64\n      {{- else }}{{ .Arch }}{{ end }}\nchecksum:\n  name_template: \"checksums.txt\"\nsnapshot:\n  name_template: \"{{ .Tag }}\"\nchangelog:\n  use: github-native\n\nnfpms:\n  - id: gnmic\n    file_name_template: >-\n      {{ .ProjectName }}_\n      {{- .Version }}_\n      {{- title .Os }}_\n      {{- if eq .Arch \"amd64\" }}x86_64\n      {{- else if eq .Arch \"386\" }}i386\n      {{- else if eq .Arch \"arm\" }}armv7\n      {{- else if eq .Arch \"arm64\" }}aarch64\n      {{- else }}{{ .Arch }}{{ end }}\n    vendor: gnmic\n    homepage: https://gnmic.openconfig.net\n    maintainer: Karim Radhouani <medkarimrdi@gmail.com>, Roman Dodin <dodin.roman@gmail.com>\n    description: gNMI CLI client and collector\n    license: Apache 2.0\n    formats:\n      - deb\n      - rpm\n    bindir: /usr/local/bin\n"
  },
  {
    "path": "Dockerfile",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFROM golang:1.24.12 AS builder\n\nWORKDIR /build\n\nCOPY go.mod go.sum /build/\nCOPY pkg/api/go.mod pkg/api/go.sum /build/pkg/api/\nCOPY pkg/cache/go.mod pkg/cache/go.sum /build/pkg/cache/\nRUN go mod download\n\nADD . /build\n\n#RUN CGO_ENABLED=0 go build -ldflags=\"-s -w\" -o gnmic .\nRUN CGO_ENABLED=0 go build -ldflags=\"-s -w\" -o gnmic .\n\nFROM alpine\nLABEL org.opencontainers.image.source=https://github.com/openconfig/gnmic\nCOPY --from=builder /build/gnmic /app/\nWORKDIR /app\nENTRYPOINT [ \"/app/gnmic\" ]\nCMD [ \"help\" ]\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<p align=center><img src=docs/images/gnmic-headline.svg?sanitize=true/></p>\n\n[![github release](https://img.shields.io/github/release/openconfig/gnmic.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/)\n[![Github all releases](https://img.shields.io/github/downloads/openconfig/gnmic/total.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/)\n[![Go Report](https://img.shields.io/badge/go%20report-A%2B-blue?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://goreportcard.com/report/github.com/openconfig/gnmic)\n[![Doc](https://img.shields.io/badge/Docs-gnmic.openconfig.net-blue?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://gnmic.openconfig.net)\n[![build](https://img.shields.io/github/actions/workflow/status/openconfig/gnmic/test.yml?branch=main&style=flat-square&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/)\n\n---\n\n`gnmic` (_pronoun.: gee·en·em·eye·see_) is a gNMI CLI client that provides full support for Capabilities, Get, Set and Subscribe RPCs with collector capabilities.\n\nDocumentation available at [https://gnmic.openconfig.net](https://gnmic.openconfig.net)\n\n## Features\n\n* **Full support for gNMI RPCs**  \n  Every gNMI RPC has a [corresponding command](https://gnmic.openconfig.net/basic_usage/) with all of the RPC options configurable by means of the local and global flags.\n* **Flexible collector deployment**  \n  `gnmic` can be deployed as a gNMI collector that supports multiple output types ([NATS](https://gnmic.openconfig.net/user_guide/outputs/nats_output/), [Kafka](https://gnmic.openconfig.net/user_guide/outputs/kafka_output/), [Prometheus](https://gnmic.openconfig.net/user_guide/outputs/prometheus_output/), [InfluxDB](https://gnmic.openconfig.net/user_guide/outputs/influxdb_output/),...).  \n  The collector can be deployed either as a [single instance](https://gnmic.openconfig.net/deployments/deployments_intro/#single-instance), as part of a [cluster](https://gnmic.openconfig.net/user_guide/HA/), or used to form [data pipelines](https://gnmic.openconfig.net/deployments/deployments_intro/#pipelines).\n* **Support gRPC tunnel based dialout telemetry**  \n  `gnmic` can be deployed as a gNMI collector with an [embedded tunnel server](https://gnmic.openconfig.net/user_guide/tunnel_server/).\n* **gNMI data manipulation**  \n  `gnmic` collector has [data transformation](https://gnmic.openconfig.net/user_guide/event_processors/intro/) capabilities that can be used to adapt the collected data to your specific use case.\n* **Dynamic targets loading**  \n  `gnmic` support [target loading at runtime](https://gnmic.openconfig.net/user_guide/targets/target_discovery/discovery_intro/) based on input from external systems.\n* **YANG-based path suggestions**  \n  Your CLI magically becomes a YANG browser when `gnmic` is executed in [prompt](https://gnmic.openconfig.net/user_guide/prompt_suggestions/) mode. In this mode the flags that take XPATH values will get auto-suggestions based on the provided YANG modules. In other words - voodoo magic :exploding_head:\n* **Multi-target operations**  \n  Commands can operate on [multiple gNMI targets](https://gnmic.openconfig.net/user_guide/targets/) for bulk configuration/retrieval/subscription.\n* **Multiple configuration sources**  \n  gnmic supports [flags](https://gnmic.openconfig.net/user_guide/configuration_flags), [environment variables](https://gnmic.openconfig.net/user_guide/configuration_env/) as well as [file based]((https://gnmic.openconfig.net/user_guide/configuration_file/)) configurations.\n* **Inspect raw gNMI messages**  \n  With the `prototext` output format you can see the actual gNMI messages being sent/received. Its like having a gNMI looking glass!\n* **(In)secure gRPC connection**  \n  gNMI client supports both TLS and [non-TLS](https://gnmic.openconfig.net/global_flags/#insecure) transports so you can start using it in a lab environment without having to care about the PKI.\n* **Dial-out telemetry**  \n  The [dial-out telemetry server](https://gnmic.openconfig.net/cmd/listen/) is provided for Nokia SR OS.\n* **Pre-built multi-platform binaries**  \n  Statically linked [binaries](https://github.com/openconfig/gnmic/releases) made in our release pipeline are available for major operating systems and architectures. Making [installation](https://gnmic.openconfig.net/install/) a breeze!\n* **Extensive and friendly documentation**  \n  You won't be in need to dive into the source code to understand how `gnmic` works, our [documentation site](https://gnmic.openconfig.net) has you covered.\n\n## Quick start guide\n\n### Installation\n\n```\nbash -c \"$(curl -sL https://get-gnmic.openconfig.net)\"\n```\n\n### Capabilities request\n\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities\n```\n\n### Get request\n\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      get --path /state/system/platform\n```\n\n### Set request\n\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      set --update-path /configure/system/name \\\n          --update-value gnmic_demo\n```\n\n### Subscribe request\n\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      sub --path \"/state/port[port-id=1/1/c1/1]/statistics/in-packets\"\n```\n\n### Prompt mode\n\nThe [prompt mode](https://gnmic.openconfig.net/user_guide/prompt_suggestions/) is an interactive mode of the gnmic CLI client for user convenience.\n\n```bash\n# clone repository with YANG models (Openconfig example)\ngit clone https://github.com/openconfig/public\ncd public\n\n# Start gnmic in prompt mode and read in all the modules:\n\ngnmic --file release/models \\\n      --dir third_party \\\n      --exclude ietf-interfaces \\\n      prompt\n```\n"
  },
  {
    "path": "cmd/demo/getresponse.textproto",
    "content": "notification: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ssh-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"protocol-version\"\n      }\n    }\n    val: {\n      string_val: \"V2\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676420328291197426\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ntp\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enabled\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\nnotification: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ntp\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable-ntp-auth\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\nnotification: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ssh-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable\"\n      }\n    }\n    val: {\n      bool_val: true\n    }\n  }\n}\nnotification: {\n  timestamp: 1676420328448197153\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676419100459254468\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"boot-time\"\n      }\n    }\n    val: {\n      uint_val: 1676419100459308639\n    }\n  }\n}\nnotification: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"telnet-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\nnotification: {\n  timestamp: 1676422427135895887\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesareredd\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676422427269965151\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesareredd\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676422434342310772\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\nnotification: {\n  timestamp: 1676422434479082363\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\n"
  },
  {
    "path": "cmd/demo/setrequest.textproto",
    "content": "replace {\n  path {\n    elem {\n      name: \"system\"\n    }\n    elem {\n      name: \"config\"\n    }\n    elem {\n      name: \"hostname\"\n    }\n  }\n  val {\n    string_val: \"violetsareblue\"\n  }\n}\nreplace {\n  path {\n    elem {\n      name: \"lacp\"\n    }\n    elem {\n      name: \"interfaces\"\n    }\n    elem {\n      name: \"interface\"\n      key {\n        key: \"name\"\n        value: \"Port-Channel9\"\n      }\n    }\n  }\n  val {\n    json_ietf_val: \"{\\n  \\\"openconfig-lacp:config\\\": {\\n    \\\"interval\\\": \\\"FAST\\\",\\n    \\\"name\\\": \\\"Port-Channel9\\\"\\n  },\\n  \\\"openconfig-lacp:name\\\": \\\"Port-Channel9\\\"\\n}\"\n  }\n}\nupdate {\n  path {\n    elem {\n      name: \"network-instances\"\n    }\n    elem {\n      name: \"network-instance\"\n      key {\n        key: \"name\"\n        value: \"VrfBlue\"\n      }\n    }\n  }\n  val {\n    json_ietf_val: \"{\\n  \\\"openconfig-network-instance:config\\\": {\\n    \\\"name\\\": \\\"VrfBlue\\\",\\n    \\\"type\\\": \\\"openconfig-network-instance-types:L3VRF\\\"\\n  },\\n  \\\"openconfig-network-instance:name\\\": \\\"VrfBlue\\\"\\n}\"\n  }\n}\n"
  },
  {
    "path": "cmd/demo/setrequest2.textproto",
    "content": "replace {\n  path {\n    elem {\n      name: \"system\"\n    }\n    elem {\n      name: \"config\"\n    }\n    elem {\n      name: \"hostname\"\n    }\n  }\n  val {\n    string_val: \"rosesarered\"\n  }\n}\nreplace {\n  path {\n    elem {\n      name: \"lacp\"\n    }\n    elem {\n      name: \"interfaces\"\n    }\n    elem {\n      name: \"interface\"\n      key {\n        key: \"name\"\n        value: \"Port-Channel9\"\n      }\n    }\n  }\n  val {\n    json_ietf_val: \"{\\n  \\\"openconfig-lacp:config\\\": {\\n    \\\"interval\\\": \\\"FAST\\\",\\n    \\\"name\\\": \\\"Port-Channel9\\\"\\n  },\\n  \\\"openconfig-lacp:name\\\": \\\"Port-Channel9\\\"\\n}\"\n  }\n}\nreplace {\n  path {\n    elem {\n      name: \"network-instances\"\n    }\n    elem {\n      name: \"network-instance\"\n      key {\n        key: \"name\"\n        value: \"VrfBlue\"\n      }\n    }\n  }\n  val {\n    json_ietf_val: \"{\\n  \\\"openconfig-network-instance:config\\\": {\\n    \\\"name\\\": \\\"VrfBlue\\\",\\n    \\\"type\\\": \\\"openconfig-network-instance-types:L3VRF\\\"\\n  },\\n  \\\"openconfig-network-instance:name\\\": \\\"VrfBlue\\\"\\n}\"\n  }\n}\n"
  },
  {
    "path": "cmd/demo/subscriberesponses.textproto",
    "content": "sync_response: true\n\nupdate: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ssh-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"protocol-version\"\n      }\n    }\n    val: {\n      string_val: \"V2\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676420328291197426\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ntp\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enabled\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ntp\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable-ntp-auth\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"ssh-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable\"\n      }\n    }\n    val: {\n      bool_val: true\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676420328448197153\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676419100459254468\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"boot-time\"\n      }\n    }\n    val: {\n      uint_val: 1676419100459308639\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676419100456944135\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"telnet-server\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"enable\"\n      }\n    }\n    val: {\n      bool_val: false\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676422427135895887\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesareredd\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676422427269965151\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesareredd\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676422434342310772\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"config\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\n\nupdate: {\n  timestamp: 1676422434479082363\n  prefix: {\n    origin: \"openconfig\"\n    target: \"fakedut\"\n  }\n  update: {\n    path: {\n      elem: {\n        name: \"system\"\n      }\n      elem: {\n        name: \"state\"\n      }\n      elem: {\n        name: \"hostname\"\n      }\n    }\n    val: {\n      string_val: \"rosesarered\"\n    }\n  }\n}\n"
  },
  {
    "path": "config.json",
    "content": "{\n    \"username\": \"admin\",\n    \"password\": \"sros\",\n    \"port\": 57400,\n    \"timeout\": \"5s\",\n    \"skip-verify\": true,\n    \"tls-key\": \"/path/to/client.key\",\n    \"tls-cert\": \"/path/to/client.crt\",\n    \"tls-ca\": \"/path/to/ca.crt\",\n    \"targets\": {\n        \"172.17.0.100\": {\n            \"timeout\": \"2s\",\n            \"subscriptions\": [\n                \"sub1\"\n            ],\n            \"outputs\": [\n                \"output1\",\n                \"output3\"\n            ]\n        },\n        \"172.17.0.101\": {\n            \"username\": \"sros\",\n            \"password\": \"sros\",\n            \"insecure\": true,\n            \"subscriptions\": [\n                \"sub2\"\n            ],\n            \"outputs\": [\n                \"output2\",\n                \"output3\"\n            ]\n        },\n        \"172.17.0.102:57000\": {\n            \"password\": \"sros123\",\n            \"tls-key\": \"/path/file1\",\n            \"tls-cert\": \"/path/file2\"\n        },\n        \"172.17.0.103\": null\n    },\n    \"subscriptions\": {\n        \"sub1\": {\n            \"paths\": [\n                \"/configure/port[port-id=*]\",\n                \"/state/port[port-id=*]\"\n            ],\n            \"stream-mode\": \"on_change\"\n        },\n        \"sub2\": {\n            \"paths\": [\n                \"/configure/port[port-id=*]/statistics\"\n            ],\n            \"stream-mode\": \"sample\",\n            \"sample-interval\": \"10s\"\n        }\n    },\n    \"outputs\": {\n        \"output1\": {\n            \"type\": \"file\",\n            \"file-type\": \"stdout\"\n        },\n        \"output2\": {\n            \"type\": \"file\",\n            \"filename\": \"local.log\"\n        },\n        \"output3\": {\n            \"type\": \"nats\",\n            \"address\": \"localhost:4222\",\n            \"subject-prefix\": \"telemetry\",\n            \"username\": null,\n            \"password\": null\n        },\n        \"output4\": {\n            \"type\": \"stan\",\n            \"address\": \"localhost:4223\",\n            \"subject\": \"telemetry\",\n            \"username\": null,\n            \"password\": null,\n            \"name\": null,\n            \"cluster-name\": \"test-cluster\",\n            \"timeout\": null,\n            \"ping-interval\": null,\n            \"ping-retry\": null\n        },\n        \"output5\": {\n            \"type\": \"kafka\",\n            \"address\": \"localhost:9092\",\n            \"topic\": \"telemetry\",\n            \"max-retry\": null,\n            \"timeout\": null\n        },\n        \"output6\": {\n            \"type\": \"nats\",\n            \"address\": \"localhost:4222\",\n            \"subject-prefix\": \"telemetry\",\n            \"username\": null,\n            \"password\": null\n        }\n    }\n}"
  },
  {
    "path": "config.toml",
    "content": "username = \"admin\"\npassword = \"sros\"\nport = 57400\ntimeout = \"5s\"\nskip-verify = true\ntls-key = \"/path/to/client.key\"\ntls-cert = \"/path/to/client.crt\"\ntls-ca = \"/path/to/ca.crt\"\n\n[targets]\n  [targets.\"172.17.0.100\"]\n  timeout = \"2s\"\n  subscriptions = [ \"sub1\" ]\n  outputs = [ \n    \"output1\", \n    \"output3\"\n    ]\n\n  [targets.\"172.17.0.101\"]\n  username = \"sros\"\n  password = \"sros\"\n  insecure = true\n  subscriptions = [ \"sub2\" ]\n  outputs = [ \n    \"output2\", \n    \"output3\" \n    ]\n\n  [targets.\"172.17.0.102:57000\"]\n  password = \"sros123\"\n  tls-key = \"/path/file1\"\n  tls-cert = \"/path/file2\"\n\n[subscriptions.sub1]\npaths = [ \n  \"/configure/port[port-id=*]\", \n  \"/state/port[port-id=*]\" \n  ]\nstream-mode = \"on_change\"\n\n[subscriptions.sub2]\npaths = [ \"/configure/port[port-id=*]/statistics\" ]\nstream-mode = \"sample\"\nsample-interval = \"10s\"\n\n[outputs.output1]\ntype = \"file\"\nfile-type = \"stdout\"\n\n[outputs.output2]\ntype = \"file\"\nfilename = \"local.log\"\n\n[outputs.output3]\ntype = \"nats\"\naddress = \"localhost:4222\"\nsubject-prefix = \"telemetry\"\n\n[outputs.output4]\ntype = \"stan\"\naddress = \"localhost:4223\"\nsubject = \"telemetry\"\ncluster-name = \"test-cluster\"\n\n[outputs.output5]\ntype = \"kafka\"\naddress = \"localhost:9092\"\ntopic = \"telemetry\"\n\n[outputs.output6]\ntype = \"nats\"\naddress = \"localhost:4222\"\nsubject-prefix = \"telemetry\"\n"
  },
  {
    "path": "config.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: sros\nport: 57400\ntimeout: 5s\nskip-verify: true\ntls-key: /path/to/client.key\ntls-cert: /path/to/client.crt\ntls-ca: /path/to/ca.crt\n\ntargets:\n  172.17.0.100:\n    timeout: 2s\n    subscriptions:\n      - sub1\n    outputs:\n      - output1\n      - output3\n  172.17.0.101:\n    username: sros\n    password: sros\n    insecure: true\n    subscriptions:\n      - sub2\n    outputs:\n      - output2\n      - output3\n  172.17.0.102:57000:\n    password: sros123\n    tls-key: /path/file1\n    tls-cert: /path/file2\n  172.17.0.103:\n    \nsubscriptions:\n  sub1:\n    paths:\n      - /configure/port[port-id=*]\n      - /state/port[port-id=*]\n    stream-mode: on_change # target-defined # sample\n  sub2:\n    paths:\n       - /configure/port[port-id=*]/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  output1:\n    type: file\n    file-type: stdout\n  output2:\n    type: file\n    filename: local.log\n  output3:\n    type: nats\n    address: localhost:4222\n    subject-prefix: telemetry\n    username:\n    password:\n  output4:\n    type: stan\n    address: localhost:4223\n    subject: telemetry\n    username:\n    password:\n    name: \n    cluster-name: test-cluster\n    timeout:\n    ping-interval:\n    ping-retry:\n  output5:\n    type: kafka\n    address: localhost:9092\n    topic: telemetry\n    max-retry: \n    timeout:\n  output6:\n    type: nats\n    address: localhost:4222\n    subject-prefix: telemetry\n    username:\n    password:\n"
  },
  {
    "path": "docs/CNAME",
    "content": "gnmic.openconfig.net"
  },
  {
    "path": "docs/basic_usage.md",
    "content": "The following examples demonstrate the basic usage of `gnmic` in a scenario where the remote target runs an unsecured (without TLS enabled) gNMI server. The `admin:admin` credentials are used to connect to the gNMI server running at `10.1.0.11:57400` address.\n\n!!!info\n    For the complete command usage examples, refer to the [\"Command reference\"](cmd/capabilities.md) menu.\n\n### Capabilities RPC\n\nGetting the device's [capabilities](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery) is done with [`capabilities`](cmd/capabilities.md) command:\n\n```bash\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities\ngNMI_Version: 0.7.0\nsupported models:\n  - nokia-conf, Nokia, 19.10.R2\n  - nokia-state, Nokia, 19.10.R2\n  - nokia-li-state, Nokia, 19.10.R2\n  - nokia-li-conf, Nokia, 19.10.R2\n<< SNIPPED >>\nsupported encodings:\n  - JSON\n  - BYTES\n```\n\n### Get RPC\n\n[Retrieving](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#33-retrieving-snapshots-of-state-information) the data snapshot from the target device is done with [`get`](cmd/get.md) command:\n\n```bash\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      get --path /state/system/platform\n\n{\n  \"source\": \"10.1.0.11:57400\",\n  \"timestamp\": 1592829586901061761,\n  \"time\": \"2020-06-22T14:39:46.901061761+02:00\",\n  \"updates\": [\n    {\n      \"Path\": \"state/system/platform\",\n      \"values\": {\n        \"state/system/platform\": \"7750 SR-1s\"\n      }\n    }\n  ]\n}\n```\n\n### Set RPC\n\n[Modifying](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state) state of the target device is done with [`set`](cmd/set.md) command:\n\n```bash\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      set --update-path /configure/system/name \\\n          --update-value gnmic_demo\n\n{\n  \"source\": \"0.tcp.eu.ngrok.io:12267\",\n  \"timestamp\": 1592831593821038738,\n  \"time\": \"2020-06-22T15:13:13.821038738+02:00\",\n  \"results\": [\n    {\n      \"operation\": \"UPDATE\",\n      \"path\": \"configure/system/name\"\n    }\n  ]\n}\n```\n\n### Subscribe RPC\n\n[Subscription](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35-subscribing-to-telemetry-updates) to the gNMI telemetry data can be done with [`subscribe`](cmd/subscribe.md) command:\n\n```bash\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      sub --path \"/state/port[port-id=1/1/c1/1]/statistics/in-packets\"\n\n{\n  \"source\": \"0.tcp.eu.ngrok.io:12267\",\n  \"timestamp\": 1592832965197288856,\n  \"time\": \"2020-06-22T15:36:05.197288856+02:00\",\n  \"prefix\": \"state/port[port-id=1/1/c1/1]/statistics\",\n  \"updates\": [\n    {\n      \"Path\": \"in-packets\",\n      \"values\": {\n        \"in-packets\": \"12142\"\n      }\n    }\n  ]\n}\n```\n\n### YANG path browser\n\n`gnmic` can produce a list of XPATH/gNMI paths for a given YANG model with its [`path`](cmd/path.md) command. The paths in that list can be used as the `--path` values for the Get/Set/Subscribe commands.\n\n```bash\n# nokia model\ngnmic path -m nokia-state --file nokia-state-combined.yang | head -10\n/state/aaa/radius/statistics/coa/dropped/bad-authentication\n/state/aaa/radius/statistics/coa/dropped/missing-auth-policy\n/state/aaa/radius/statistics/coa/dropped/invalid\n/state/aaa/radius/statistics/coa/dropped/missing-resource\n/state/aaa/radius/statistics/coa/received\n/state/aaa/radius/statistics/coa/accepted\n/state/aaa/radius/statistics/coa/rejected\n/state/aaa/radius/statistics/disconnect-messages/dropped/bad-authentication\n/state/aaa/radius/statistics/disconnect-messages/dropped/missing-auth-policy\n/state/aaa/radius/statistics/disconnect-messages/dropped/invalid\n```\n"
  },
  {
    "path": "docs/blog/index.md",
    "content": "Coming soon"
  },
  {
    "path": "docs/changelog.md",
    "content": "## Changelog\n\n### v0.45.0 - March 2nd 2026\n\n- Prometheus and Prometheus RemoteWrite outputs:\n\n    - When converting values to labels, duplicate label names are now resolved by prepending parent path elements until uniqueness is achieved, preventing metrics from being dropped.\n\n- Get/Set commands:\n\n    - Custom gNMI extensions can be included in requests via `--registered-extensions` and a JSON payload; use `--proto-dir` and `--proto-file` to specify the extension Protobuf definitions.\n\n- Formatters:\n\n    - JSON output no longer escapes HTML characters (`<`, `>`, `&`), producing more readable output for values such as path prefixes containing `->`.\n\n- Outputs:\n\n    - OTLP: Implemented dynamic config via `Update()` and `UpdateProcessor()`, configurable Resource vs DataPoint level attributes, support for multiple metrics per gNMI message, and include gNMIc version in OTLP resource scope.\n\n- Target:\n\n    - Target last error is now reflected in a consistent way across collector state and API responses.\n\n- gNMI API:\n\n    - Improved error formatting and added tests for the `pkg/api` package.\n\n- Performance:\n\n    - Pool bytes buffers and strings builders where it makes sense to reduce allocations.\n\n- Dependencies:\n\n    - Bumped `github.com/cloudflare/circl` from 1.6.1 to 1.6.3.\n    - Bumped `github.com/go-git/go-git/v5` from 5.13.0 to 5.16.5.\n    - Bumped `go.opentelemetry.io/otel/sdk` from 1.38.0 to 1.40.0.\n\n### v0.44.0 - February 17th 2026\n\n- gNMI Extensions:\n\n    - gNMI extensions in get, set, and subscribe responses are now parsed and displayed as JSON when using `--proto-dir`, `--proto-file`, and `--registered-extensions` with the corresponding Protobuf files.\n\n- Collector mode:\n\n    - Collector mode now stores Targets state (gNMI connection and subscription(s) state) in a separate store.\n\n    - Collector mode supports an SSE endpoint streaming config and state for any object (Target, subscription, outputs, etc.)\n\n- Target:\n\n    - Multiple gRPC level config knobs can now be set per target: gRPC read/write buffer, gRPC window size, and other dial options. Configuration is documented in the target configuration reference.\n\n### v0.43.0 - February 1st 2026\n\n- Inputs:\n\n    - Jetstream:\n      - Added support for configuring `max-ack-pending` to limit the maximum number of unacknowledged messages on a NATS JetStream input.\n      - DeliverPolicy and AckPolicy are now fully configurable for greater flexibility and control.\n      - Added NATS JetStream workqueue retention pattern support for exactly-once message processing in task distribution scenarios.\n\n- Outputs:\n\n    - Jetstream:\n      - Added `retention-policy` configuration option with support for `limits` (default) and `workqueue` retention policies.\n      - Stream existence verification with detailed logging; omit `create-stream` to use existing streams.\n\n    - Introduced support for OpenTelemetry Protocol (OTLP) as an output destination, enabling direct export of telemetry data to OTLP-compatible backends with full metric conversion (gauges, counters, histograms) and custom resource attributes.\n\n- Commands:\n\n    - Added the new `collector` command: Runs gNMIc in collector mode, enabling dynamic, live updates to all configuration objects including targets, subscriptions, outputs, inputs, and processors. Unlike the `subscribe` command, the `collector` command supports on-the-fly configuration changes via the REST API, without requiring a restart. gNMIc automatically reconciles changes to maintain the desired state.\n\n    - The `collector` command also includes a suite of subcommands, allowing you to configure the gNMIc collector directly from the CLI.\n\n- Formatters:\n\n    - Flat format: Fixed leading slash handling when origin is not included in prefix or prefix is non-existent, ensuring consistent path formatting across all notification types.\n\n- Processors:\n\n    - `event_group_by` processor now correctly handles delete events.\n\n- API:\n\n    - Fixed the API path to patch subscriptions for a target ID.\n\n- Target:\n\n    - When a target is removed, it is now also removed from the configuration.\n\n- Dependencies:\n\n    - Fixed `gnmic/pkg/api` module version mismatch in go.mod for consumers building gNMIc as a dependency.\n    - Bumped `golang.org/x/crypto` to v0.45.0.\n\n### v0.42.0 - September 19th 2025\n\n- Inputs:\n\n    - Add support for NATS Jetstream input type.\n\n    - Kafka: Fixed event parsing when `eventMsg` was not initialized, preventing nil pointer dereference.\n\n- Loader:\n\n    - Loaded targets subscribe requests are now subject to the `subscribe-backoff` timer when new targets are added via loaders (HTTP, file, etc.) or config change events.\n\n    - Loaded target configuration now supports environment variable expansion when `expand-env` is set to true, enabling per-target credentials via env vars.\n\n    - Consul loader: Fixed tag matching logic to allow services with extra metadata tags (subset matching); services with required tags plus additional tags are no longer incorrectly rejected.\n\n    - Consul loader: Improved Go template parsing for target name and event-tags.\n\n    - HTTP loader: Various fixes and added tests (fixes #712).\n\n- Outputs:\n\n    - Kafka: Fixed missing label in error metric that could cause panics when error reason was unavailable.\n\n- gNMI server:\n\n    - The unary RPCs timeout is now configurable via `gnmi-server.timeout` in the config (default remains 2 minutes).\n\n- Get command:\n\n    - Added optional organization and version for model selection: prepend `/` for organization, append `:` for version when specifying models.\n\n- Subscribe:\n\n    - Fixed `sync_response` output being suppressed for ONCE mode subscriptions; behavior now matches STREAM mode.\n\n- Targets:\n\n    - A new internal Prometheus metric `gnmic_target_connection_state` reflects the gRPC client connection state with values: 0 (UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN). The `target_up` metric now correctly reflects connection failures (e.g., auth issues).\n\n- Bug fixes:\n\n    - Fixed memory leak when subscription fails: `cancel()` reference was kept alive indefinitely.\n\n    - Fixed OS environment variable values being incorrectly lowercased (fixes #663).\n\n- API:\n\n    - Documentation updated for `POST /api/v1/config/targets` to reflect that the `name` field is required for proper target identification.\n\n- Dependencies:\n\n    - Bumped `golang.org/x/crypto` to v0.41.0.\n    - Bumped `golang.org/x/oauth2` to v0.31.0.\n\n### v0.41.0 - April 6th 2025\n\n- Processors:\n\n    - Added `event-time-epoch` processor, enabling converting string-based time values into epoch timestamps.\n\n    - Fixed `ieeefloat32` processor for correct handling of binary IEEE float32 values.\n\n- Target Discovery:\n\n    - Consul loader: Adds the ability to use Go Templates on Consul targets to set target name as well as event-tags (e.g., `target-name`, `target-tags` with `{{.Meta.*}}`).\n\n- Loader:\n\n    - When a target configuration changes, loaders now generate delete and add actions so the subscription is restarted to apply the new parameters (fixes #563).\n\n- Outputs:\n\n    - Messages are now exported to outputs in sequence to avoid sync responses being sent before initial notifications (fixes #612).\n\n    - Output internal metrics are now registered only once, preventing duplicate registration errors (fixes #586).\n\n- Path generation:\n\n    - Fixed xpath generation with prefix: module prefix is now replaced with module name when generating xpaths (fixes #633).\n\n- Targets:\n\n    - `target_up` metric now resets before creating metrics so deleted targets (e.g., via Consul) no longer show as still up (fixes #604).\n\n- Dependencies:\n\n    - Bumped `github.com/golang/glog` to v1.2.4.\n\n### v0.40.0 - January 27th 2025\n\n- Processors:\n\n    - Introducing `event-value-tag-v2` processor, enabling the addition of values as tags to other messages\n      without requiring caching to be enabled in the associated output.\n  \n- Logging related to calls to the `/api/v1/healthz` API endpoint is now optional.\n\n- Clustering:\n  \n    - New REST API endpoints added:\n      - Switch the cluster leader: `DELETE /api/v1/cluster/leader`\n      - Drain an instance: `POST /api/v1/members/{id}/drain` where id is the instance name to be drained\n      - Rebalance the load between instances: `POST /api/v1/cluster/rebalance`\n\n### v0.39.0 - November 7th 2024\n\n- Get Command\n\n    - Added `--dry-run` flag.\n\n- Set Command\n\n    - Added `--no-trim` flag to disable trimming white spaces from values payload.\n\n- REST API\n\n    - Added `/api/v1/admin/shutdown` endpoint to shutdown gNMIc.\n\n- Outputs:\n\n    - File: file output now supports file rotation.\n\n    - NATS and Jetstream: The publishers buffer size is now configurable.\n\n- Build:\n\n    - Added `ARM64` binary and container image.\n\n- gNMIc Metrics:\n\n    - Added a metric to keep track of failed subscribe requests.\n\n    - Added a metric to keep track of targets connectivity state.\n\n- Clustering:\n\n    - The REST API client used for building gNMIc cluster can now be configured with client certificates to support mTLS.\n\n- Processors:\n\n    - Added a processor to handle converting binary IEEE float32 values to float32.\n\n\n### v0.38.0 - July 8th 2024\n\n- Kafka Output\n\n    - Add configurable Kafka version\n\n- gNMI extensions\n\n    - Implement [Commit confirmed gNMI extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-commit-confirmed.md)\n    - Implement [Depth gNMI extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md)\n\n### v0.37.0 - May 13th 2024\n\n- gNMI connection TCP Keepalive\n\n    - It is now possible to configure the TCP keepalive probes time interval.\n\n- gRPC Keepalive\n\n    - The gRPC connection keepalive parameters are now configurable.\n    It follows the gRPC spec: https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md\n\n- Proxy Command:\n\n    - gNMIc now supports a `proxy` command.\n    When issued, gNMIc runs as a gNMI proxy. See details [here](cmd/proxy.md)\n\n- Processor Command:\n\n    - gNMIc now supports a `processor` command.\n    It can be used to run a set of processor offline against an input of event messages and print the result.\n    See details [here](cmd/processor.md)\n\n- Kafka Output:\n\n    - The Kafka output now supports a configurable flush-interval parameters.\n\n- InfluxDB Output:\n\n    - The InfluxDB output now supports writing gNMI deletes to InfluxDB using a custom tag name.\n\n- Prometheus Output:\n\n    - The Prometheus output will now automatically convert boolean values (true: 1 and false: 0).\n\n\n### v0.36.0 - February 13th 2024\n\n- Event Message\n\n    - gNMI updates with deleted paths are now converted into separate event messages where the keys are extracted from the path and set as event tags.\n\n- gNMI TLS cipher suites\n\n    - It is now possible to select the list of a cipher suites that gNMIc advertises to a gNMI server during a TLS handshake. The full list of supported ciphers can be found [here](user_guide/targets/targets.md#controlling-the-advertised-cipher-suites)\n\n- Set Request\n\n    - The Set command now features a new flag, `--proto-file`, which allows the specification of one or more files. These files should contain gNMI Set requests in `prototext` format, which will be sent to the specified targets.\n\n### v0.35.0 - January 20th 2024\n\n- Processors\n\n    - Added a plugin process type that allows users to write their own custom processors: [examples](https://github.com/openconfig/gnmic/tree/main/examples/plugins)\n\n- gRPC metadata\n\n    - A new flag `--metadata | -H` is introduced. It allows users to add custom gRPC metadata headers to any request.\n\n\n- Outputs:\n\n    - Kafka output:\n        - Added support for custom topics per target/subscription.\n\n        - Added support for both Async and Sync Kafka producers.\n\n- Commands:\n\n    - Listen command:\n        When using the `listen` command outputs internal metrics are properly initialized and exposed to prometheus for scraping.\n\n### v0.34.0 - November 11th 2023\n\n- Prometheus Write Output\n\n    - The number of `prometheus_write` writers can now be configured.\n\n- Subscription Encoding\n\n    - A subscription encoding can now be set per target. Before, it was either a global attribute or set per subscription.\n        With this change, it can be set globally, per target or per subscription.\n\n- Processors:\n\n    - New `event-combine` processor: A convenience processor that allows combining other processors into a single one.\n\n    - New `event-rate-limit` processor: A processor that rate-limits each event with matching tags to the configured amount per-seconds.\n\n- Outputs:\n\n    - New `asciigraph` output: https://asciinema.org/a/617477\n\n- Clustering:\n\n    - New `redis` locker: For leader election, service discovery and target distribution gNMIc supports both `Consul` and `Kubernetes`. It is now possible to use `redis` for the same purpose.\n\n### v0.33.0 - October 8th 2023\n\n- Rest API\n\n    - Added a kubernetes friendly `api/v1/healthz` endpoint.\n\n- Set Command\n\n    - Added support for gNMI set [`union_replace`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-union_replace.md) operation.\n\n- Outputs\n\n    - Allow the number of workers used by the `prometheus` and `prometheus_write` outputs to be configurable to improve performance.\n\n- Go version\n\n    - Upgrade to Golang v1.21.1.\n\n### v0.32.0 - August 31st 2023\n\n- TLS\n\n    - It is now possible to override the serverName used by gNMIc when verifying the server name present in the\n      certificate sent by the gNMI server. [PR](https://github.com/openconfig/gnmic/pull/173)\n\n- Subscription\n \n    - Added support for mixing on-change and sample stream subscription in the same gRPC stream. [PR](https://github.com/openconfig/gnmic/pull/197)\n    - Added support for attaching specific outputs to a subscription. [PR](https://github.com/openconfig/gnmic/pull/209)\n\n- REST API\n\n    - Added a health chek endpoint to be used by kubernetes. [PR](https://github.com/openconfig/gnmic/pull/202)\n\n- Kafka Output\n\n    - Added support for Kafka compression. [PR](https://github.com/openconfig/gnmic/pull/203)\n\n- Generate Path\n \n    - Added `enum-values` to the `JSON` output of `generate path` command. [PR](https://github.com/openconfig/gnmic/pull/215)\n\n### v0.31.0 - May 17th 2023\n\n- Prometheus output\n\n    - When using the Consul auto discovery feature of the Proemtheus output,\n      it is now possible to configure different service and listen addresses.\n      This is useful when gNMIc is running as a container of behind a NAT device\n\n- Set Request file\n\n    - The CLI origin is now allowed in the `path` field of `updates`, `replaces` and `deletes` in a set request file.\n      If the `path` field has the `cli:/` origin, the `value` field is expected to be a string and will be set in an `ascii` TypedValue.\n\n### v0.30.0 - April 18th 2023\n\n- Set Command\n\n    - The [set command](cmd/set.md) now supports the flags `--replace-cli`, `--replace-cli-file`, `--update-cli` and `--update-cli-file`, these flags can be used to send gNMI set requests with the CLI origin.\n\n- Logging:\n  \n    - Reduce log verbosity of File and HTTP target discovery mechanisms.\n\n- Processors:\n\n    - The [Drop](user_guide/event_processors/event_drop.md) event processor completely removes the message to be dropped instead of replacing it with an empty message.\n\n- Inputs:\n\n    - [Kafka input](user_guide/inputs/kafka_input.md) now supports TLS connections.\n\n- Outputs:\n\n    - [Kafka output](user_guide/outputs/kafka_output.md) now has a configuration attribute called `insert-key`, if true, the messages written will include a key built from the gNMI message source and subscription name.\n\n    - [TCP output](user_guide/outputs/tcp_output.md) now has a configuration attribute called `delimiter`, it allows to set user defined string to be sent between each message. This allows the receiving end to properly split JSON objects. It it particularly useful with Logstash when writing gNMI events to an ELK stack.\n\n- TLS:\n\n    - When using `gNMIc`'s components that expose a TLS server (gNMI server, Tunnel server, Rest API and Prometheus output) it's possible to fine tune the how the server requests and validates a client certificate.\n\n        This is done using the configuration attribute `client-auth` under each server's TLS section, it takes 4 different values:\n\n        - request:\n            The server requests a certificate from the client but does not require the client to send a certificate.\n            If the client sends a certificate, it is not required to be valid.\n\n        - require:\n            The server requires the client to send a certificate and does not fail if the client certificate is not valid.\n\n        - verify-if-given:\n            The server requests a certificate, does not fail if no certificate is sent. If a certificate is sent it is required to be valid.\n\n        - require-verify:\n            The server requires the client to send a valid certificate.\n\n- Diff Command:\n\n    - The [diff command](cmd/diff/diff.md) has 2 new sub commands:\n\n        - [`setrequest`](cmd/diff/diff_setrequest.md): compares the intent between two `SetRequest` messages encoded in textproto format.\n\n        - [`set-to-notifs`](cmd/diff/diff_set_to_notifs.md): verifies whether a set of\n            notifications from a `GetResponse` or a stream of `SubscribeResponse` messages\n            comply with a `SetRequest` messages in textproto format. The envisioned use case\n            is to check whether a stored snapshot of device state matches that of the\n            intended state as specified by a `SetRequest`.\n\n- Outputs:\n\n    - When using the `event` format with certain outputs (`file`, `nats`, `jetstream`, `kafka`, `tcp` or `udp`) it's possible to send event message individually as opposed to sending them in an array.\n        This is done using the attribute `split-events: true` under each of the outputs configuration sections.\n\n    - [Prometheus output](user_guide/outputs/prometheus_output.md) now supports a custom service address field under `service-registration`, it specifies the address to be registered in Consul for discovery.\n        It can be a hostname, an IP address or a IP/Host:Port socket address. It it does not contain a port number, the port number from the `listen` field is used.\n\n- Set Request file\n\n    - The Set request file can be used with Origin `cli`, gNMIc will properly format the commands as string, not as JSON value.\n\n### v0.29.0 - February 20th 2023\n\n- Generate Path\n\n    - The `generate path` command with the flag `--json` shows the features the path depends on.\n      The list of features is built recursively from the YANG attribute `if-feature`.\n\n- Processors:\n\n    - New processor [`event-starlark`](user_guide/event_processors/event_starlark.md) allows to run a [starlak](https://github.com/google/starlark-go/blob/master/doc/spec.md) script on the received messages.\n\n- Loaders\n\n    - The [HTTP loader](user_guide/targets/target_discovery/http_discovery.md) now supports different authentication schemas as well as setting a template from a local file.\n\n### v0.28.0 - December 7th 2022\n\n- Targets\n\n    - Targets static tags are now properly propagated to outputs when a cache is used.\n\n- Listen Command:\n\n    - The `system-name` HTTP2 header is now used as a tag in exported metrics.\n\n- Outputs:\n\n    - The timestamp precision under `gNMIc`'s InfluxDB output is now configurable.\n\n    - Added a new `snmp` output type, it allows to dynamically convert gNMI updates into SNMP traps.\n\n### v0.27.0 - October 8th 2022\n\n- Targets\n\n    - Add supports for socks5 proxies per target.\n\n- Logging\n\n    - Support for log rotation via the flags `--log-max-size`, `log-max-backups` and `--log-compress`\n\n### v0.26.0 - June 28th 2022\n\n- Outputs\n\n    - Add [Prometheus Remote Write output](user_guide/outputs/prometheus_write_output.md), this output type can be used to push metrics to various systems like [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)...\n    - Add [NATS Jetstream output](user_guide/outputs/jetstream_output.md), it allows to write metrics to NATS jetstream which supports persistency and filtering.\n\n- [gNMI historical subscriptions](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose)\n\n    `gNMIc` now support historical subscription using the [gNMI history extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#2-definition)\n\n### v0.25.1 - June 13th 2022\n\n- Upgrade Go version to go1.18.1.\n\n- Fix running `gnmic subscribe` with only Inputs and Outputs configured (no subscriptions or targets).\n\n### v0.25.0 - June 11th 2022\n\n- Processors\n\n    - [Strings replace processor](user_guide/event_processors/event_strings.md) supports replaces using regular expressions.\n\n    - Processors  are now supported when collecting telemetry using [listen command](cmd/listen.md) (Nokia SROS specific)\n\n- New Processors\n\n    - [Data convert](user_guide/event_processors/event_data_convert.md)\n\n    - [Duration convert](user_guide/event_processors/event_duration_convert.md)\n    \n    - [Value tag](user_guide/event_processors/event_value_tag.md)\n\n- [Clustering](user_guide/HA.md)\n\n    - `gNMIc` supports kubernetes based clustering,\n    i.e you can build `gNMIc` clusters on kubernetes without the need for Consul cluster.\n\n- [Yang path generation](cmd/generate.md)\n\n    - The command `gnmic generate path` supports generating paths for YANG containers.\n    In earlier versions, the paths generation was done for YANG leaves only.\n\n- Internal gNMIc Prometheus metrics\n\n    `gNMIc` exposes additional internal metrics available to be scraped using Prometheus.\n\n- Static tags from target configuration\n\n    - It is now possible to set static tags on events by configuring them under each target.\n\n- Influxdb cache\n\n    The [InfluxDB output](user_guide/outputs/influxdb_output.md) now supports gNMI based caching, allowing to apply processors on multiple event messages at once and batching the written points to InfluxDB.\n\n### v0.24.0 - March 13th 2022\n\n- [gRPC Tunnel Support](user_guide/tunnel_server.md)\n\n    Add support for gNMI RPC using a gRPC tunnel, gNMIc runs as a collector with an embedded tunnel server.\n\n### v0.23.0 - February 24th 2022\n\n- Docker image:\n\n    - The published `gnmic` docker image is now based on `alpine` instead of an empty container.\n    - A `from scratch` image is published and can be obtained using the command:\n     ```bash\n     docker pull ghcr.io/karimra/gnmic:latest-scratch\n     docker pull ghcr.io/karimra/gnmic:v0.23.0-scratch\n     ```\n\n- [gNMIc Golang API](user_guide/golang_package/intro.md):\n\n    - Add gNMI responses constructors\n    - Add gRPC tunnel proto messages constructors\n\n- [Target Discovery](user_guide/targets/target_discovery/discovery_intro.md):\n\n    - Add the option to transform the loaded targets format using a Go text template for file and HTTP loaders\n    - Poll based target loaders (file, HTTP and docker) now support a startup delay timer\n\n### v0.22.1 - February 2nd 2022\n\n- Fix a Prometheus output issue when using gNMI cache that causes events to be missing from the metrics.\n\n### v0.22.0 - February 1st 2022\n\n- [gNMIc Golang API](user_guide/golang_package/intro.md):\n\n    Added the `github.com/karimra/gnmic/api` golang package.\n    It can be imported by other Golang programs to ease the creation of gNMI targets and gNMI Requests.\n\n### v0.21.0 - January 23rd 2022\n\n- [Generate Cmd](cmd/generate/generate_path.md):\n\n    Add YANG module namespace to generated paths.\n\n- Outputs:\n\n    Outputs [File](user_guide/outputs/file_output.md), [NATS](user_guide/outputs/nats_output.md) and [Kafka](user_guide/outputs/kafka_output.md) now support a `msg-template` field to customize the written messages using Go templates.\n\n- API:\n\n    Add [Cluster API](user_guide/api/cluster.md) endpoints.\n\n- Actions:\n\n    Add [Template](user_guide/actions/actions.md#template-action) action.\n\n    Add Subscribe ONCE RPC to [gNMI](user_guide/actions/actions.md#gnmi-action) action.\n\n    Allow [gNMI](user_guide/actions/actions.md#gnmi-action) action on multiple targets.\n\n    Add [Script](user_guide/actions/actions.md#script-action) action.\n\n- [Get Cmd](cmd/get.md):\n\n    Implement Format `event` for GetResponse messages.\n\n    Add the ability to execute processors with Get command flag [`--processor`](cmd/get.md#processor) on GetResponse messages.\n\n- [Target Discovery](user_guide/targets/target_discovery/discovery_intro.md):\n\n    Add the ability to run [actions](user_guide/actions/actions.md) on target discovery or deletion.\n\n- [Set Cmd](cmd/set.md):\n\n    Add [`--dry-run`](cmd/set.md#dry-run) flag which runs the set request templates and prints their output without sending the SetRequest to the targets.\n\n- TLS:\n\n    Add pre-master key logging for TLS connections using the flag [`--log-tls-secret`](global_flags.md#log-tls-secret). The key can be used to decrypt encrypted gNMI messages using wireshark.\n\n- Target:\n\n    Add `target.Stop()` method to gracefully close the target underlying gRPC connection.\n\n### v0.20.0 - October 19th 2021\n\n- Add [gomplate](https://docs.gomplate.ca) template functions to all templates rendered by `gnmic`.\n\n- [Path generation](cmd/generate/generate_path.md):\n\n    `gnmic generate path` supports generating paths with type and description in JSON format.\n\n- [Set RPC template](cmd/set.md#templated-set-request-file):\n\n    Set RPC supports multiple template files in a single command.\n\n- [Clustering](user_guide/HA.md):\n\n    `gnmic` clusters can be formed using secure (HTTPS) API endpoints.\n\n- [Configuration payload generation](cmd/generate.md):\n\n    Configuration keys can now be formatted as `camelCase` or `snake_case` strings\n\n### v0.19.1 - October 7th 2021\n\n- Path search\n  \n  Do not enter search mode if not paths are found.\n\n- [Prometheus Output](user_guide/outputs/prometheus_output.md)\n\n  Change the default service name when registering with a Consul server\n\n### v0.19.0 - September 16th 2021\n\n- Event Processors\n\n    [Event Convert](user_guide/event_processors/event_convert.md) now converts binary float notation to float\n\n- Target Loaders:\n\n    - [HTTP Loader](user_guide/targets/target_discovery/http_discovery.md)\n\n      gNMIc can now dynamically discover targets from a remote HTTP server.\n\n      HTTP Loader is now properly instrumented using Prometheus metrics.\n\n    - [File Loader](user_guide/targets/target_discovery/file_discovery.md)\n\n      Supports remote files (ftp, sftp, http(s)) in addition to local file system files.\n\n      File loader is now properly instrumented using Prometheus metrics.\n\n    - [Consul Loader](user_guide/targets/target_discovery/consul_discovery.md)\n\n      Consul Loader is now properly instrumented using Prometheus metrics.\n\n    - [Docker Loader](user_guide/targets/target_discovery/docker_discovery.md)\n\n      Docker Loader is now properly instrumented using Prometheus metrics.\n\n- gRPC\n\n     gNMIc now adds its version as part of the user-agent HTTP header.\n\n### v0.18.0 - August 17th 2021\n\n- [gNMI Server](user_guide/gnmi_server.md):\n\n    Add support for a global gNMI server.\n    It supports all types of subscriptions, ran against a local cache build out the configured subscriptions.\n    It support Get and Set RPCs as well, those are run against the configured targets.\n\n    The gNMI server supports Consul based service registration.\n\n- Outputs:\n\n    Add support for [gNMI server](user_guide/outputs/gnmi_output.md) output type\n\n- [Target configuration](user_guide/targets/targets.md):\n\n    Support multiple IP addresses per target, all addresses are tried simultaneously.\n    The first successful gRPC connection is used.\n\n- [Prometheus Output](user_guide/outputs/prometheus_output.md):\n\n    Add the option of generating Prometheus metrics on-scrape, instead of on-reception.\n    The gNMI notifications are stored in a local cache and used to generate metrics when a Prometheus server sends a scrape request.\n\n- Event Processors:\n\n    Add [`group-by`](user_guide/event_processors/event_group_by.md) processor, it groups events together based on a given criteria.\n    The events can belong to different gNMI notifications or even to different subscriptions.\n\n- Event Processor Convert:\n\n    Add support for boolean conversion\n\n- [Deployment Examples](deployments/deployments_intro.md):\n\n    Add [containerlab](https://containerlab.srlinux.dev) based deployment examples.\n    These deployment come with a router fabric built using Nokia's [SRL](https://learn.srlinux.dev)\n\n- [API server](user_guide/api/api_intro.md):\n\n    Add Secure API server configuration options\n\n- Target Loaders:\n\n    [Consul loader](user_guide/targets/target_discovery/consul_discovery.md#services-watch) update: Add support for gNMI target discovery from Consul services.\n\n- Get Request:\n\n    Add printing of Target as part of Path Prefix\n\n- Set Request:\n\n    Add printing of Target as part of Path Prefix\n\n### v0.17.0 - July 14th 2021\n\n- Event Trigger:\n\n    Enhance `event-trigger` to run multiple actions sequentially when an event occurs.\n\n    The output of an action can be used in the following ones.\n\n- Kafka output:\n\n    Add `SASL_SSL` and `SSL` security protocols to kafka output.\n\n- gRPC authentication:\n\n    Add support for token based gRPC authentication.\n\n### v0.16.2 - July 13th 2021\n\n- Fix nil pointer dereference in case a subscription has `suppress-redundant` but no `heartbeat-interval`.\n\n### v0.16.1 - July 12th 2021\n\n- Bump github.com/openconfig/goyang version to v0.2.7\n  \n### v0.16.0 - June 14th 2021\n\n- Target Discovery:\n\n    Add Docker Engine target loader, `gnmic` can dynamically discover gNMI targets running as docker containers.\n\n- Event Trigger: gNMI action\n\n    Enhance `gNMI action` to take external variables as input, in addition to the received gNMI update.\n\n### v0.15.0 - June 7th 2021\n\n- Subscription:\n\n   Add field `set-target` under subscription config, a boolean that enables setting the target name as a gNMI prefix target.\n\n- Outputs:\n\n   Add `add-target` and `target-template` fields under all outputs,\n   Enables adding the target value as a tag/label based on the subscription and target metadata\n\n### v0.14.3 - June 6th 2021\n\n- Set command:\n\n    Fix `ascii` values encoding if used with `--request-file` flag.\n\n### v0.14.2 - June 3rd 2021\n\n- Fix `event-convert` processor when the conversion is between integer types.\n- Add an implicit conversion of uint to int if the influxdb output version is 1.8.x.\n  This is a workaround for the limited support of influx APIv2 by influxDB1.8\n\n### v0.14.1 - May 31st 2021\n\n- Fix OverrideTS processor\n- Add `override-timestamps` option under outputs, to override the message timestamps regardless of the message output format\n\n### v0.14.0 - May 28th 2021\n\n- New Output format `flat`\n    - This format prints the Get and Subscribe RPCs as a list of `xpath: value`, where the `xpath` points to a leaf value.\n\n- New `gnmic diff` command:\n    - This command prints the difference in responses between a reference target `--ref` and one or more targets to be compared to the reference `--compare`.\n    - The output is printed as `flat` format results.\n  \n### v0.13.0 - May 10th 2021\n\n- New `gnmic generate` Command:\n    - Given a set of yang models and an xpath, `gnmic generate` generates a JSON/YAML representation of the YANG object the given path points to.\n    - Given a set of yang models and an set of xpaths (with `--update` or `--replace`), `gnmic generate set-request` generates a set request file that can be filled with the desired values and used with `gnmic set --request-file`\n    - The sub-command `gnmic generate path` is an alias to `gnmic path`\n\n- Path Command:\n    - add flag `--desc` which, if present, prints the YANG leaf description together with the generated paths.\n    - add flag `--config-only` which, if present, only generates paths pointing to YANG leaves representing config data.\n    - add flag `--state-only` which, if present, only generates paths pointing to a YANG leaf representing state data.\n\n### v0.12.2 - April 24th 2021\n\n- Fix a bug that cause gNMIc to crash if certain processors are used.\n\n### v0.12.1 - April 21st 2021\n\n- Fix parsing of stringArray flags containing a space.\n\n### v0.12.0 - April 20th 2021\n\n- Outputs:\n    - InfluxDB and Prometheus outputs: Convert gNMI Decimal64 values to Float64.\n- Set Command:\n    - Add the ability to run a Set command using a single file, including `replaces`, `updates` and `deletes`.\n    - The request file `--request-file` is either a static file or a Golang Text Template rendered separately for each target.\n    - The template input is read from a file referenced by the flag `--request-vars`.\n  \n### v0.11.0 - April 15th 2021\n\n- Processors:\n    - Add `event-allow` processor, basically an allow ACL based on `jq` condition or regular expressions.\n    - Add `event-extract-tags` processor, it adds tags based on regex named groups from tag names, tag values, value names, or values.\n    - Add `gnmi-action` to `event-trigger` processor, the action runs a gNMI Set or Get if the trigger condition is met.\n- Set Command:\n    - Improve usability by supporting reading values (--update-file and --replace-file) from standard input.\n\n### v0.10.0 - April 8th 2021\n\n- New command:\n    - `getset` command: This command conditionally executes both a `Get` and a `Set` RPC, the `GetResponse` is used to evaluate a condition which if met triggers the execution of the `Set` RPC.\n- Processors:\n    - Some processors' apply condition can be expressed using `jq` instead of regular expressions. \n\n### v0.9.1 - March 23rd 2021\n\n- Processors:\n    - Add `event-trigger` processor: This processor is used to trigger a predefined action if a condition is met.\n    - New processor `event-jq` which applies a transformation on the messages expressed as a jq expression.\n    \n- Shell autocompletion:\n    - Shell (bash, zsh and fish) autocompletion scripts can be generated using `gnmic completion [bash|zsh|fish]`.\n- gRPC gzip compression:\n    - `gnmic` supports gzip compression on gRPC connections.\n  \n### v0.9.0 - March 11th 2021\n\n- Clustered Prometheus output:\n    - When deployed as a cluster, it is possible to register only one of the prometheus outputs in Consul. This is handy in the case of a cluster with data replication.\n- Proto file loading at runtime (Nokia SROS):\n    - `gnmic` supports loading SROS proto files at runtime to decode gNMI updates with `proto` encoding\n- Kafka Output:\n    - Kafka SASL support: PLAIN, SCRAM SHA256/SHA512 OAuth mechanisms are supported.\n- Configuration:\n    - `gnmic` supports configuration using environment variables.\n- Processors:\n    - add `event-merge` processor.\n- Target Loaders:\n    - `gnmic` supports target loaders at runtime, new targets can be added to the configuration from a file that `gnmic` watches or from `Consul`\n  \n### v0.8.0 - March 2nd 2021\n\n- Inputs:\n    - Processors can now be applied by the input plugins.\n- Prometheus output:\n    - The Prometheus output can now register as a service in Consul, a Prometheus client can discover the output using consul service discovery.\n- Clustering:\n    - `gnmic` can now run as a cluster, this requires a running Consul instance that will be used by the `gnmic` instance for leader election and target load sharing.\n- Configuration file:\n    - The default configuration file placement now follows [XDG](https://wiki.archlinux.org/index.php/XDG_Base_Directory) recommendations\n- CLI exit status:\n    - Failure of most commands is properly reflected in the cli exit status.\n- Configuration:\n    - Configuration fields that are OS paths are expanded by `gnmic`\n- Deployment examples:\n    - A set of deployment examples is added to the repo and the docs.\n  \n### v0.7.0 - January 28th 2021\n\n- Prometheus output metrics customization:\n    - `metric-prefix` and `append-subscription-name` can be used to change the default metric prefix and append the subscription name to the metric name.\n    - `export-timestamps`: enables/disables the export of timestamps together with the metric.\n    - `strings-as-labels`: enables/disables automatically adding paths with a value of type string as a metric label.\n\n- NATS output:\n    - allow multiple NATS workers under NATS output via field `num-workers`.\n    - add NATS prometheus internal metrics.\n\n- STAN output:\n    - allow multiple STAN workers under STAN output via field `num-workers`.\n    - add NATS prometheus internal metrics.\n\n- File output:\n    - add File prometheus metrics.\n  \n- Inputs:\n    - support ingesting gNMI data from NATS, STAN or a Kafka message bus.\n\n\n### v0.6.0 - December 14th 2020\n\n- Processors:\n    - Added processors to `gnmic`, a set of basic processors can be used to manipulate gNMI data flowing through `gnmic`. These processors are applied by the output plugins\n\n- Upgrade command: `gnmic` can be upgraded using `gnmic version upgrade` command.\n\n### v0.5.2 - December 1st 2020\n- Outputs:\n    - Improve outputs logging\n    - Add Prometheus metrics to Kafka output\n\n### v0.5.1 - November 28th 2020\n- Prompt Mode:\n    - Fix subscribe RPC behavior\n- QoS:\n    - Do not populate QoS field if not set via config file or flag.\nOutputs:\n    - add configurable number of workers to some outputs.\n\n### v0.5.0 - November 25th 2020\n- Prompt Mode:\n    - Add prompt sub commands.\n- XPATH parsing:\n    - Add custom xpath parsingto gnmi.Path to allow for paths including column `:`.\n- TLS:\n    - Allow configurable TLS versions per target, the minimum, the maximum and the preferred TLS versions ca be configured.\n  \n### v0.4.3 - November 10th 2020\n- Missing path:\n    - Initialize the path field if not present in SubscribeResponse\n  \n### v0.4.2 - November 5th 2020\n- YANG:\n    - Prompt command flags `--file` and `--dir` support globs.\n- Subscribe:\n    - added flags `--output` that allows to choose a single output for `subscribe` updates\n- Prompt:\n    - Max suggestions is automatically adjusted based on the terminal height.\n    - Add suggestions for address and subscriptions.\n\n### v0.4.1 - October 22nd 2020\n- Prompt:\n    - Add suggestions of xpath with origin, `--suggest-with-origin`.\n### v0.4.0 - October 21st 2020\n- New Command:\n    - Add new command `prompt`\n- Prompt:\n    - Add ctrl+z key bind to delete a single path element.\n    - Add YANG info to xpath suggestions.\n    - Add GoLeft, GoRight key binds.\n    - Sort xpaths and prefixes suggestions.\n    - xpaths suggestions are properly generated if a prefix is present.\n    - flag `--suggest-all-flags` allows adding global flags suggestion in prompt mode.\n  \n- Prometheus output:\n    - Add support for Prometheus output plugin.\n    \n### v0.3.0 - October 1st 2020\n- InfluxDB output:\n    - Add support for influxDB output plugin.\n\n### v0.2.3 - September 18th 2020\n- Retry\n    - Add basic RPC retry mechanism.\n- ONCE mode subscription:\n    - Handle targets that send an EOF error instead of a SyncResponse to signify the end of ONCE subscriptions.\n- Docker image:\n    - Docker images added to ghcr.io as well as docker hub.\n  \n### v0.2.2 - September 3rd 2020\n- CLI:\n    - Properly handle paths that include quotes.\n- Unix Socket:\n    - Allow send/rcv of gNMI data to/from a unix socket.\n- Outputs:\n    - Add TCP output plugin.\n\n### v0.2.1 - August 11th 2020\n- Releases:\n    - Add .deb. and .rpm packages to releases.\n - Outputs:\n    - Add UDP output plugin. \n\n### v0.2.0 - August 7th 2020\n- Releases:\n    - Add ARM releases.\n    - Push docker image to docker hub.\n  \n### v0.1.1 - July 23rd 2020\n- Set Cmd:\n    - Support `json_ietf` encoding when the value is specified from a file.\n  \n### v0.1.0 - July 16th 2020\n- Outputs:\n    - Allow NATS/STAN output subject customization.\n\n### v0.0.7 - July 16th 2020\n- gNMI Target:\n    - Add support for gNMI Target field.\n- gNMI Origin:\n    - Add support for gNMI Origin field.  \n- Prometheus internal metrics:\n    - Add support for `gnmic` internal metrics via a Prometheus server.\n- Outputs:\n    - Add support for multiple output plugins (file, NATS, STAN, Kafka)\n- Targets:\n    - Support target specific configuration.\n- Poll Subscription:\n    - Allow selecting polled targets and subscription using a CLI select menu.\n- gNMI Models:\n    - Support multiple Models in Get and Subscribe RPCs.\n\n### v0.0.6 - June 2nd 2020\n- Nokia Dialout:\n    - Add Support for Nokia Dialout telemetry.\n- Printing:\n    - Convert timestamps to Time.\n\n### v0.0.5 - May 18th 2020\n- Formatting:\n    - Add `textproto` format.\n\n### v0.0.4 - May 11th 2020\n- Logging:\n    - Support logging to file instead of Stderr.\n- Set Command:\n    - support Set values from YAML file.\n\n### v0.0.3 - April 23rd 2020\n- Proxy:\n    - Allow usage of ENV proxy values for gRPC connections.\n- Installation:\n    - Add installation script.\n\n### v0.0.2 - April 13th 2020\n- Terminal printing clean up.\n- Path Command: Add search option.\n\n### v0.0.1 - March 24th 2020\n- Capabilities RPC Command.\n- Get RPC Command.\n- Subscribe RPC Command.\n- Set RPC Command.\n- TLS support.\n- Version Command.\n- Path Commnd.\n\n### initial Commit - February 20th 2020\n"
  },
  {
    "path": "docs/cmd/capabilities.md",
    "content": "## Description\n\nThe `[cap | capabilities]` command represents the [gNMI Capabilities RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L51).\n\nIt is used to send a [Capability Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L431) to the specified target(s) and expects one [Capability Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L440) per target.\n\n[Capabilities](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery) allows the client to retrieve the set of capabilities that is supported by the target:\n\n* gNMI version\n* available data models\n* supported encodings\n* gNMI extensions\n\nThis allows the client to, for example, validate the service version that is implemented and retrieve the set of models that the target supports. The models can then be specified in subsequent Get/Subscribe RPCs to precisely tell the target which models to use.\n\n### Usage\n\n`gnmic [global-flags] capabilities [local-flags]`\n\n### Examples\n\n#### single host\n\n```text\ngnmic -a <ip:port> --username <user> --password <password> \\\n      --insecure capabilities\n\ngNMI_Version: 0.7.0\nsupported models:\n  - nokia-conf, Nokia, 19.10.R2\n  - nokia-state, Nokia, 19.10.R2\n  - nokia-li-state, Nokia, 19.10.R2\n  - nokia-li-conf, Nokia, 19.10.R2\n<< SNIPPED >>\nsupported encodings:\n  - JSON\n  - BYTES\n```\n\n#### multiple hosts\n\n```bash\ngnmic -a <ip:port>,<ip:port> -u <user> -p <password> \\\n      --insecure cap\n```\n\n\n\n<script id=\"asciicast-319561\" src=\"https://asciinema.org/a/319561.js\" async></script>"
  },
  {
    "path": "docs/cmd/collector.md",
    "content": "### Description\n\nThe `[collect | collector | coll | c]` command starts gNMIc as a long-running telemetry collector service. Unlike the `subscribe` command which is designed for interactive use, the collector command is optimized for production deployments with dynamic configuration capabilities via REST API.\n\nThe collector provides:\n\n- **Dynamic configuration** - Add, modify, or remove targets, subscriptions, outputs, inputs, and processors at runtime via REST API\n- **Clustering support** - Multiple collector instances can form a cluster with automatic target distribution and failover\n- **Embedded gNMI server** - Expose collected telemetry to downstream gNMI clients\n- **Tunnel target support** - Accept connections from gNMI tunnel targets\n\n### Usage\n\n`gnmic [global-flags] collect [local-flags]`\n\n### Local Flags\n\n#### pyroscope-server-address\n\nThe `[--pyroscope-server-address]` flag sets the Pyroscope server address for continuous profiling. When set, the collector will send profiling data to the specified Pyroscope server.\n\n#### pyroscope-application-name\n\nThe `[--pyroscope-application-name]` flag sets the application name used in Pyroscope. Defaults to `gnmic-collector`.\n\n### Subcommands\n\nThe collector command provides subcommands to interact with a running collector instance via its REST API:\n\n| Subcommand | Aliases | Description |\n|------------|---------|-------------|\n| `targets` | `target`, `tg` | Manage targets |\n| `subscriptions` | `subscription`, `sub` | Manage subscriptions |\n| `outputs` | `output`, `out` | Manage outputs |\n| `inputs` | `input`, `in` | Manage inputs |\n| `processors` | `processor`, `proc` | Manage processors |\n\nEach subcommand supports the following operations:\n\n| Operation | Aliases | Description |\n|-----------|---------|-------------|\n| `list` | `ls` | List all resources |\n| `get` | `g`, `show`, `sh` | Get a specific resource |\n| `set` | `create`, `cr` | Create or update a resource |\n| `delete` | `d`, `del`, `rm` | Delete a resource |\n\n### Configuration\n\nThe collector is configured using the standard gNMIc configuration file. The key sections are:\n\n```yaml\n# API server configuration (required for collector)\napi-server:\n  address: :7890\n  timeout: 10s\n  tls:\n    ca-file:\n    cert-file:\n    key-file:\n  enable-metrics: false\n  debug: false\n\n# Targets to collect from\ntargets:\n  router1:\n    address: 10.0.0.1:57400\n    username: admin\n    password: admin\n    skip-verify: true\n\n# Subscriptions define what data to collect\nsubscriptions:\n  interfaces:\n    paths:\n      - /interfaces/interface/state/counters\n    mode: stream\n    stream-mode: sample\n    sample-interval: 10s\n\n# Outputs define where to send collected data\noutputs:\n  prometheus:\n    type: prometheus\n    listen: :9804\n    path: /metrics\n\n# Inputs for receiving data from message queues\ninputs:\n  nats-input:\n    type: nats\n    address: nats://localhost:4222\n    subject: telemetry.>\n\n# Event processors for data transformation\nprocessors:\n  add-hostname:\n    event-add-tag:\n      tags:\n        - tag-name: hostname\n          value: ${HOST}\n\n# Clustering configuration (optional)\nclustering:\n  cluster-name: gnmic-cluster\n  instance-name: gnmic-1\n  locker:\n    type: consul\n    address: consul:8500\n\n# gNMI server configuration (optional)\ngnmi-server:\n  address: :57401\n  skip-verify: true\n```\n\n### Examples\n\n#### 1. Start a basic collector\n\n```bash\ngnmic --config collector.yaml collect\n```\n\n#### 2. Start with Pyroscope profiling\n\n```bash\ngnmic --config collector.yaml collect \\\n      --pyroscope-server-address http://pyroscope:4040 \\\n      --pyroscope-application-name my-collector\n```\n\n#### 3. List targets from a running collector\n\n```bash\ngnmic --config collector.yaml collect targets list\n```\n\nOutput:\n```\nNAME      ADDRESS         USERNAME  STATE    SUBSCRIPTIONS  OUTPUTS  INSECURE  SKIP VERIFY\nrouter1   10.0.0.1:57400  admin     running  2              1        false     true\nrouter2   10.0.0.2:57400  admin     running  2              1        false     true\n```\n\n#### 4. Get details of a specific target\n\n```bash\ngnmic --config collector.yaml collect targets get --name router1\n```\n\n#### 5. Create a new target\n\n```bash\ngnmic --config collector.yaml collect targets set --input target.yaml\n```\n\nWhere `target.yaml` contains:\n```yaml\nname: router3\naddress: 10.0.0.3:57400\nusername: admin\npassword: admin\nskip-verify: true\nsubscriptions:\n  - interfaces\noutputs:\n  - prometheus\n```\n\n#### 6. Delete a target\n\n```bash\ngnmic --config collector.yaml collect targets delete --name router3\n```\n\n#### 7. List subscriptions\n\n```bash\ngnmic --config collector.yaml collect subscriptions list\n```\n\nOutput:\n```\nNAME        PREFIX  PATHS                                    ENCODING  MODE           SAMPLE INTERVAL  TARGETS  OUTPUTS\ninterfaces  -       /interfaces/interface/state/counters     json      stream/sample  10s              2/2      1\n```\n\n#### 8. List outputs\n\n```bash\ngnmic --config collector.yaml collect outputs list\n```\n\nOutput:\n```\nNAME        TYPE        FORMAT  EVENT PROCESSORS\nprometheus  prometheus  -       1\n```\n\n#### 9. List processors with details\n\n```bash\ngnmic --config collector.yaml collect processors list --details\n```\n\n### See Also\n\n- [Collector Introduction](../user_guide/collector/collector_intro.md) - Overview and architecture\n- [Collector Configuration](../user_guide/collector/collector_configuration.md) - Detailed configuration reference\n- [Collector REST API](../user_guide/collector/collector_api.md) - API endpoints reference\n"
  },
  {
    "path": "docs/cmd/diff/diff.md",
    "content": "\n### Description\n\nThe `diff` command is similar to a `get` or `subscribe` (mode ONCE) commands ran against at least 2 targets, a reference and one or more compared targets.\nThe command will compare the returned responses from the compared targets to the ones returned from the reference target and only print the difference between them.\n\nThe output is printed as a list \"flattened\" gNMI updates, each line containing an XPath pointing to a leaf followed by its value.\n\nEach line is preceded with either signs `+` or `-`:\n\n- `+` means the leaf and its value are present in the compared target but not in the reference target.\n- `-` means the leaf and its value are present in the reference target but not in the compared target.\n\ne.g:\n\n```text\n+\tnetwork-instance[name=default]/interface[name=ethernet-1/36.0]: {}\n-\tnetwork-instance[name=default]/protocols/bgp/autonomous-system: 101\n```\n\nThe output above indicates:\n\n- The compared target has interface `ethernet-1/36.0` added to network instance `default` while the reference doesn't.\n- The compared target is missing the autonomous-system `101` configuration under network-instance `default` protocols/bgp compared to the reference.\n\nThe data to be compared is specified with the flag `--path`, which can be set multiple times to compare multiple data sets.\nBy default, the data it is retrieved using a `Get RPC`, if the flag `--sub` is present, a `Subscribe RPC` with mode ONCE is used instead.\n\nEach of the `get` and `subscribe` methods has pros and cons, with the `get` method you can choose to compare `CONFIG` or `STATE` only, via the flag `--type`.\nThe `subscribe` method allows to stream the response(s) in case a larger data set needs to be compared. In addition to that, some routers support more encoding options when using the `subscribe RPC`\n\nMultiple targets can be compared to the reference at once, the printed output of each difference will start with the line `\"$reference\" vs \"$compared\"`\n\nAliases: `compare`\n\n### Usage\n\n`gnmic [global-flags] diff [local-flags]`\n\n### Flags\n\n#### ref\n\nThe `--ref` flag is a mandatory flag that specifies the target to used as reference to compare other targets to.\n\n#### compare\n\nThe `--compare` flag is a mandatory flag that specifies the targets to compare to the reference target.\n\n#### prefix\n\nAs per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--path` flag. Defaults to `\"\"`.\n\n#### path\n\nThe mandatory path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) the client wants to receive a snapshot of.\n\nMultiple paths can be specified by using multiple `--path` flags:\n\n```bash\ngnmic --insecure \\\n      --ref router1\n      --compare router2,router3\n      diff --path \"/state/ports[port-id=*]\" \\\n           --path \"/state/router[router-name=*]/interface[interface-name=*]\"\n```\n\nIf a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `\"origin:path\"`:\n\n#### model\n\nThe optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported.\n\n#### target\n\nWith the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message.\n\n#### type\n\nThe type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server.\n\nOne of:  ALL, CONFIG, STATE, OPERATIONAL (defaults to \"ALL\")\n\n#### sub\n\nWhen the flag `--sub` is present, `gnmic` will use a `Subscribe RPC` with mode ONCE, instead of a `Get RPC` to retrieve the data to be compared.\n\n### Examples\n\n```bash\ngnmic diff -t config --skip-verify -e ascii \\\n           --ref clab-te-leaf1 \\\n           --compare clab-te-leaf2 \\\n           --path /network-instance\n```\n\n```bash\n\"clab-te-leaf1:57400\" vs \"clab-te-leaf2:57400\"\n+\tnetwork-instance[name=default]/interface[name=ethernet-1/36.0]                                    : {}\n-\tnetwork-instance[name=default]/protocols/bgp/autonomous-system                                    : 101\n+\tnetwork-instance[name=default]/protocols/bgp/autonomous-system                                    : 102\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]            : {}\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/admin-state: enable\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/peer-as    : 201\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/peer-group : eBGPv6\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]            : {}\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/admin-state: enable\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/peer-as    : 202\n-\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/peer-group : eBGPv6\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]            : {}\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/admin-state: enable\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/peer-as    : 201\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/peer-group : eBGPv6\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]            : {}\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/admin-state: enable\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/peer-as    : 202\n+\tnetwork-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/peer-group : eBGPv6\n+\tnetwork-instance[name=default]/protocols/bgp/router-id                                            : 10.0.1.2\n-\tnetwork-instance[name=default]/protocols/bgp/router-id                                            : 10.0.1.1\n-\tnetwork-instance[name=myins]                                                                      : {}\n-\tnetwork-instance[name=myins]/admin-state                                                          : enable\n-\tnetwork-instance[name=myins]/description                                                          : desc1\n-\tnetwork-instance[name=myins]/interface[name=ethernet-1/36.0]                                      : {}\n-\tnetwork-instance[name=myins]/type                                                                 : ip-vrf\n```\n"
  },
  {
    "path": "docs/cmd/diff/diff_set_to_notifs.md",
    "content": "### Description\n\nThe `diff set-to-notifs` command is used to verify whether a set of\nnotifications from a `GetResponse` or a stream of `SubscribeResponse` messages\ncomply with a `SetRequest` messages in textproto format. The envisioned use case\nis to check whether a stored snapshot of device state matches that of the\nintended state as specified by a `SetRequest`.\n\nThe output is printed as a list of \"flattened\" gNMI updates, each line\ncontaining an XPath pointing to a leaf followed by its value.\n\nEach line is preceded with either signs `+` or `-`:\n\n-   `+` means the leaf and its value are present in the new SetRequest but not\n    in the reference SetRequest.\n-   `-` means the leaf and its value are present in the reference SetRequest but\n    not in the new SetRequest.\n\ne.g:\n\n```text\nSetToNotifsDiff(-want/SetRequest, +got/Notifications):\n- /lacp/interfaces/interface[name=Port-Channel9]/config/interval: \"FAST\"\n- /lacp/interfaces/interface[name=Port-Channel9]/config/name: \"Port-Channel9\"\n- /lacp/interfaces/interface[name=Port-Channel9]/name: \"Port-Channel9\"\n- /network-instances/network-instance[name=VrfBlue]/config/name: \"VrfBlue\"\n- /network-instances/network-instance[name=VrfBlue]/config/type: \"openconfig-network-instance-types:L3VRF\"\n- /network-instances/network-instance[name=VrfBlue]/name: \"VrfBlue\"\nm /system/config/hostname:\n  - \"violetsareblue\"\n  + \"rosesarered\"\n```\n\nThe output above indicates:\n\n-   The set of paths starting with\n    `/lacp/interfaces/interface[name=Port-Channel9]/config/interval: \"FAST\"` are\n    present in the SetRequest but missing in the response from the device.\n-   The value at path `/system/config/hostname` does not match that of the\n    SetRequest.\n\nWhen `--full` is specified, values common between the SetRequest and the\nresponse messages are also shown.\n\n### How to obtain a GetResponse or SubscribeResponse\n\nTo obtain GetRespnse/SubscribeResponse in textproto format, simply run `gnmic`'s\nsubscribe or get functions and pass in the flag `--format prototext`.\n\nResponses retrieved from either GetRequest or SubscribeRequest are supported by\nthis command's `--response` flag.\n\n### Usage\n\n`gnmic [global-flags] diff set-to-notifs [local-flags]`\n\n### Flags\n\n#### setrequest\n\nThe `--setrequest` flag is a mandatory flag that specifies the reference gNMI\nSetRequest textproto file for comparing against the new SetRequest.\n\n#### response\n\nThe `--response` flag is a mandatory flag that specifies the gNMI Notifications\ntextproto file (can contain a GetResponse or SubscribeResponse stream) for\ncomparing against the reference SetRequest.\n\n### Examples\n\n```bash\n$ gnmic diff set-to-notifs --setrequest cmd/demo/setrequest.textproto --response cmd/demo/subscriberesponses.textproto\n```\n"
  },
  {
    "path": "docs/cmd/diff/diff_setrequest.md",
    "content": "### Description\n\nThe `diff setrequest` command is used to compare the intent between two\n`SetRequest` messages encoded in textproto format.\n\nThe output is printed as a list of \"flattened\" gNMI updates, each line\ncontaining an XPath pointing to a leaf followed by its value.\n\nEach line is preceded with either signs `+` or `-`:\n\n-   `+` means the leaf and its value are present in the new SetRequest but not\n    in the reference SetRequest.\n-   `-` means the leaf and its value are present in the reference SetRequest but\n    not in the new SetRequest.\n\ne.g:\n\n```text\nSetRequestIntentDiff(-A, +B):\n-------- deletes/replaces --------\n+ /network-instances/network-instance[name=VrfBlue]: deleted or replaced only in B\n-------- updates --------\nm /system/config/hostname:\n  - \"violetsareblue\"\n  + \"rosesarered\"\n```\n\nThe output above indicates:\n\n-   The new target deletes or replaces the path\n    `/network-instances/network-instance[name=VrfBlue]` while the reference\n    doesn't.\n-   The new target changes the value of `/system/config/hostname` compared to\n    the reference from `\"violetsareblue\"` to `\"rosesarered\"`.\n\nWhen `--full` is specified, values common between the two SetRequest are also\nshown.\n\n### SetRequest Intent\n\nIt is possible for two SetRequests to be different but which are semantically\nequivalent -- i.e. they both modify the same leafs in the same ways. In other\nwords, their overall effects are the same.\n\nFor example, a replace on the leaf `/system/config/hostname` with the value\n`\"foo\"` is the same as an update on the same leaf with the same value. A replace\non the container `/system/` with the value `{ config: { hostname: \"foo\" } }` is\nthe same as a delete on that container followed by a replace to the leaf.\nOverwrites are also possible, although this is currently unsupported.\n\nIn order to compare equivalent SetRequests correctly, this tool breaks down a\nSetRequest into its \"minimal intent\" (deletes followed by updates) prior to the\ndiff computation. This is why the output groups deletes/replaces into the same\nsection.\n\n### Usage\n\n`gnmic [global-flags] diff setrequest [local-flags]`\n\n### Flags\n\n#### ref\n\nThe `--ref` flag is a mandatory flag that specifies the reference gNMI\nSetRequest textproto file for comparing against the new SetRequest.\n\n#### new\n\nThe `--new` flag is a mandatory flag that specifies the new gNMI SetRequest\ntextproto file for comparing against the reference SetRequest.\n\n### Examples\n\n```bash\n$ gnmic diff setrequest --ref cmd/demo/setrequest.textproto --new cmd/demo/setrequest2.textproto\n```\n"
  },
  {
    "path": "docs/cmd/generate/generate_path.md",
    "content": "\n\n### Description \n\nThe path sub command is an alias for the [`gnmic path`](../../cmd/path.md) command.\n"
  },
  {
    "path": "docs/cmd/generate/generate_set_request.md",
    "content": "\n### Description\n\nThe set-request sub command generates a Set request file given a list of update and/or replace paths.\n\nIf no paths are supplied, a root (`/`) replace path is used as a default.\n\nThe generated file can be manually edited and used with `gnmic` set command:\n\n`gnmic set --request-file <path_to_generated_file>`\n\nAliases: `sreq`, `srq`, `sr`\n\n### Usage\n\n`gnmic [global-flags] generate [generate-flags] set-request [sub-command-flags]`\n\n### Flags\n\n#### update\n\nThe `--update` flag specifies a valid xpath, used to generate an __updates__ section of the [set request file](../set.md#template-based-set-request).\n\nMultiple `--update` flags can be supplied.\n\n#### replace\n\nThe `--replace` flag specifies a valid xpath, used to generate a __replaces__ section of the [set request file](../set.md#template-based-set-request).\n\nMultiple `--replace` flags can be supplied.\n\n### Examples\n\n#### Openconfig\n\nYANG repo: [openconfig/public](https://github.com/openconfig/public)\n\nClone the OpenConfig repository:\n\n```bash\ngit clone https://github.com/openconfig/public\ncd public\n```\n\n```bash\ngnmic --encoding json_ietf \\\n          generate  \\\n          --file release/models \\\n          --dir third_party \\\n          --exclude ietf-interfaces \\\n          set-request \\\n          --replace /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address\n```\n\nThe above command generates the below YAML output (JSON if `--json` flag is supplied)\n\n```yaml\nreplaces:\n- path: /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address\n  value:\n  - config:\n      ip: \"\"\n      prefix-length: \"\"\n    ip: \"\"\n    vrrp:\n      vrrp-group:\n      - config:\n          accept-mode: \"false\"\n          advertisement-interval: \"100\"\n          preempt: \"true\"\n          preempt-delay: \"0\"\n          priority: \"100\"\n          virtual-address: \"\"\n          virtual-router-id: \"\"\n        interface-tracking:\n          config:\n            priority-decrement: \"0\"\n            track-interface: \"\"\n        virtual-router-id: \"\"\n  encoding: JSON_IETF\n```\n\nThe __value__ section can be filled with the desired configuration variables.\n\n\n#### Nokia SR OS\n\n```bash\ngit clone https://github.com/nokia/7x50_YangModels\ncd 7x50_YangModels\ngit checkout sros_21.2.r2\n```\n\n```bash\ngnmic generate \\\n        --file YANG/nokia-combined \\\n        --dir YANG \\\n        set-request \\\n        --replace /configure/service/vprn/bgp/family\n```\n\nThe above command generates the below YAML output (JSON if `--json` flag is supplied)\n\n```yaml\nreplaces:\n- path: /configure/service/vprn/bgp/family\n  value:\n    flow-ipv4: \"false\"\n    flow-ipv6: \"false\"\n    ipv4: \"true\"\n    ipv6: \"false\"\n    label-ipv4: \"false\"\n    mcast-ipv4: \"false\"\n    mcast-ipv6: \"false\"\n```\n\n#### Cisco\n\nYANG repo: [YangModels/yang](https://github.com/YangModels/yang)\n\nClone the `YangModels/yang` repo and change into the main directory of the repo:\n\n```bash\ngit clone https://github.com/YangModels/yang\ncd yang/vendor\n```\n\n```bash\ngnmic --encoding json_ietf \\\n          generate  \\\n          --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \\\n          --file vendor/cisco/xr/721/Cisco-IOS-XR-ipv4-bgp-oper.yang \\\n          --dir standard/ietf \\\n          set-request \\\n          --path /active-nodes\n```\n\nThe above command generates the below YAML output (JSON if `--json` flag is supplied)\n\n```yaml\nreplaces:\n- path: /active-nodes\n  value:\n    active-node:\n    - node-name: \"\"\n      selective-vrf-download:\n        role:\n          address-family:\n            ipv4:\n              unicast: \"\"\n            ipv6:\n              unicast: \"\"\n        vrf-groups:\n          vrf-group:\n          - vrf-group-name: \"\"\n  encoding: JSON_IETF\n```\n\n#### Juniper\n\nYANG repo: [Juniper/yang](https://github.com/Juniper/yang)\n\nClone the Juniper YANG repository and change into the release directory:\n\n```bash\ngit clone https://github.com/Juniper/yang\ncd yang/20.3/20.3R1\n```\n\n```bash\ngnmic --encoding json_ietf \\\n          generate\n          --file junos/conf \\\n          --dir common \n          set-request \\\n          --replace /configuration/interfaces/interface/unit/family/inet/address\n```\n\nThe above command generates the below YAML output (JSON if `--json` flag is supplied)\n\n```yaml\nreplaces:\n- path: /configuration/interfaces/interface/unit/family/inet/address\n  value:\n  - apply-groups: \"\"\n    apply-groups-except: \"\"\n    apply-macro:\n    - data:\n      - name: \"\"\n        value: \"\"\n      name: \"\"\n    arp:\n    - case_1: \"\"\n      case_2: \"\"\n      l2-interface: \"\"\n      name: \"\"\n      publish: \"\"\n    broadcast: \"\"\n    destination: \"\"\n    destination-profile: \"\"\n    master-only: \"\"\n    multipoint-destination:\n    - apply-groups: \"\"\n      apply-groups-except: \"\"\n      apply-macro:\n      - data:\n        - name: \"\"\n          value: \"\"\n        name: \"\"\n      case_1: \"\"\n      case_2: \"\"\n      epd-threshold:\n        apply-groups: \"\"\n        apply-groups-except: \"\"\n        apply-macro:\n        - data:\n          - name: \"\"\n            value: \"\"\n          name: \"\"\n        epd-threshold-plp0: \"\"\n        plp1: \"\"\n      inverse-arp: \"\"\n      name: \"\"\n      oam-liveness:\n        apply-groups: \"\"\n        apply-groups-except: \"\"\n        apply-macro:\n        - data:\n          - name: \"\"\n            value: \"\"\n          name: \"\"\n        down-count: \"\"\n        up-count: \"\"\n      oam-period:\n        disable: {}\n        oam_period: \"\"\n      shaping:\n        apply-groups: \"\"\n        apply-groups-except: \"\"\n        apply-macro:\n        - data:\n          - name: \"\"\n            value: \"\"\n          name: \"\"\n        cbr:\n          cbr-value: \"\"\n          cdvt: \"\"\n        queue-length: \"\"\n        rtvbr:\n          burst: \"\"\n          cdvt: \"\"\n          peak: \"\"\n          sustained: \"\"\n        vbr:\n          burst: \"\"\n          cdvt: \"\"\n          peak: \"\"\n          sustained: \"\"\n      transmit-weight: \"\"\n    name: \"\"\n    preferred: \"\"\n    primary: \"\"\n    virtual-gateway-address: \"\"\n    vrrp-group:\n    - advertisements-threshold: \"\"\n      apply-groups: \"\"\n      apply-groups-except: \"\"\n      apply-macro:\n      - data:\n        - name: \"\"\n          value: \"\"\n        name: \"\"\n      authentication-key: \"\"\n      authentication-type: \"\"\n      case_1: \"\"\n      case_2: \"\"\n      case_3: \"\"\n      name: \"\"\n      preferred: \"\"\n      priority: \"\"\n      track:\n        apply-groups: \"\"\n        apply-groups-except: \"\"\n        apply-macro:\n        - data:\n          - name: \"\"\n            value: \"\"\n          name: \"\"\n        interface:\n        - apply-groups: \"\"\n          apply-groups-except: \"\"\n          apply-macro:\n          - data:\n            - name: \"\"\n              value: \"\"\n            name: \"\"\n          bandwidth-threshold:\n          - name: \"\"\n            priority-cost: \"\"\n          name: \"\"\n          priority-cost: \"\"\n        priority-hold-time: \"\"\n        route:\n        - priority-cost: \"\"\n          route_address: \"\"\n          routing-instance: \"\"\n      virtual-link-local-address: \"\"\n      vrrp-inherit-from:\n        active-group: \"\"\n        active-interface: \"\"\n        apply-groups: \"\"\n        apply-groups-except: \"\"\n        apply-macro:\n        - data:\n          - name: \"\"\n            value: \"\"\n          name: \"\"\n    web-authentication:\n      apply-groups: \"\"\n      apply-groups-except: \"\"\n      apply-macro:\n      - data:\n        - name: \"\"\n          value: \"\"\n        name: \"\"\n      http: \"\"\n      https: \"\"\n      redirect-to-https: \"\"\n  encoding: JSON_IETF\n```\n\n#### Arista\n\nYANG repo: [aristanetworks/yang](https://github.com/aristanetworks/yang)\n\nArista uses a subset of OpenConfig modules and does not provide IETF modules inside their repo. So make sure you have IETF models available so you can reference it, a `openconfig/public` is a good candidate.\n\nClone the Arista YANG repo:\n\n```bash\ngit clone https://github.com/aristanetworks/yang\ncd yang\n```\n\nThe above command generates the below YAML output (JSON if `--json` flag is supplied)\n\n```bash\ngnmic --encoding json_ietf \\\n          generate\n          --file EOS-4.23.2F/openconfig/public/release/models \\\n          --dir ../openconfig/public/third_party/ietf \\\n          --exclude ietf-interfaces \\\n          set-request \\\n          --replace bgp/neighbors/neighbor/config\n```\n\n```yaml\nreplaces:\n- path: bgp/neighbors/neighbor/config\n  value:\n    auth-password: \"\"\n    description: \"\"\n    enabled: \"true\"\n    local-as: \"\"\n    neighbor-address: \"\"\n    peer-as: \"\"\n    peer-group: \"\"\n    peer-type: \"\"\n    remove-private-as: \"\"\n    route-flap-damping: \"false\"\n    send-community: NONE\n```\n"
  },
  {
    "path": "docs/cmd/generate.md",
    "content": "\n\n### Description\n\nMost `gNMI` targets use YANG as a modeling language for their datastores.\nIt order to access and manipulate the stored data (`Get`, `Set`, `Subscribe`), a tool should be aware of the underlying YANG model, be able to generate paths pointing to the desired `gNMI` objects as well as building configuration payloads matching data instances on the targets.\n\nThe `generate` command takes the target's YANG models as input and generates:\n\n- Paths in `xpath` or `gNMI` formats.\n- Configuration payloads that can be used as [update](../cmd/set.md#3-update-with-a-value-from-json-or-yaml-file) or [replace](../cmd/set.md#3-replace-with-a-value-from-json-or-yaml-file) input files for the Set command.\n- A Set request file that can be used as a [template](../cmd/set.md#template-based-set-request) with the Set command.\n\nAliases: `gen`\n\n### Usage\n\n`gnmic [global-flags] generate [local-flags]`\n\nor\n\n`gnmic [global-flags] generate [local-flags] sub-command [sub-command-flags]`\n\n### Persistent Flags\n\n#### output\n\nThe `--output` flag specifies the file to which the generated output will be written, defaults to `stdout`\n\n#### json\n\nWhen used with `generate` command, the `--json` flag, if present changes the output format from YAML to JSON.\n\nWhen used with `generate path` command, it outputs the path, the leaf **type**, its **description**, its **default value** and if it is a **state leaf** or not in an array of JSON objects.\n\n### Local Flags\n\n#### path\n\nThe `--path` flag specifies the path whose payload (JSON/YAML) will be generated.\n\nDefaults to `/`\n\n#### config-only\n\nThe `--config-only` flag, if present instruct `gnmic` to generate JSON/YAML payloads from YANG nodes not marked as `config false`.\n\n#### camel-case\n\nThe `--camel-case` flag, if present allows to convert all the keys in the generated JSON/YAML paylod to `CamelCase`\n\n#### snake-case\n\nThe `--snake-case` flag, if present allows to convert all the keys in the generated JSON/YAML paylod to `snake_case`\n\n### Sub Commands\n\n#### Path\n\nThe path sub command is an alias for the [`gnmic path`](../cmd/path.md) command.\n\n#### Set-request\n\nThe [set-request](../cmd/generate/generate_set_request.md) sub command generates a Set request file given a list of update and/or replace paths.\n\n### Examples\n\n#### Openconfig\n\nYANG repo: [openconfig/public](https://github.com/openconfig/public)\n\nClone the OpenConfig repository:\n\n```bash\ngit clone https://github.com/openconfig/public\ncd public\n```\n\n```bash\ngnmic --encoding json_ietf \\\n          generate  \\\n          --file release/models \\\n          --dir third_party \\\n          --exclude ietf-interfaces \\\n          --path /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address\n```\n\n```yaml\n- config:\n    ip: \"\"\n    prefix-length: \"\"\n  ip: \"\"\n  vrrp:\n    vrrp-group:\n    - config:\n        accept-mode: \"false\"\n        advertisement-interval: \"100\"\n        preempt: \"true\"\n        preempt-delay: \"0\"\n        priority: \"100\"\n        virtual-address: \"\"\n        virtual-router-id: \"\"\n      interface-tracking:\n        config:\n          priority-decrement: \"0\"\n          track-interface: \"\"\n      virtual-router-id: \"\"\n```\n"
  },
  {
    "path": "docs/cmd/get.md",
    "content": "### Description\n\nThe `get` command represents the gNMI [Get RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L57).\n\nIt is used to send a [GetRequest](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L395) to the specified target(s) (using the global flag [`--address`](../global_flags.md#address) and expects one [GetResponse](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L420) per target, per path.\n\nThe [Get RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#33-retrieving-snapshots-of-state-information) is used to retrieve a snapshot of data from the target. It requests that the target snapshots a subset of the data tree as specified by the paths included in the message and serializes this to be returned to the client using the specified encoding.\n\n### Usage\n\n`gnmic [global-flags] get [local-flags]`\n\n### Flags\n\n#### prefix\n\nAs per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--path` flag. Defaults to `\"\"`.\n\n#### path\n\nThe mandatory path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) the client wants to receive a snapshot of.\n\nMultiple paths can be specified by using multiple `--path` flags:\n\n```bash\ngnmic -a <ip:port> --insecure \\\n      get --path \"/state/ports[port-id=*]\" \\\n          --path \"/state/router[router-name=*]/interface[interface-name=*]\"\n```\n\nIf a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `\"origin:path\"`:\n\n!!! note\n    The path after the origin value has to start with a `/`\n\n```\ngnmic -a <ip:port> --insecure \\\n      get --path \"openconfig-interfaces:/interfaces/interface\"\n```\n\n#### model\n\nThe optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported.\n\n#### target\n\nWith the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message.\n\n#### values-only\n\nThe flag `[--values-only]` allows to print only the values returned in a GetResponse. This is useful when only the value of a leaf is of interest, like check if a value was set correctly.\n\n#### type\n\nThe type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server.\n\nOne of:  ALL, CONFIG, STATE, OPERATIONAL (defaults to \"ALL\")\n\n#### processor\n\nThe `[--processor]` flag allow to list [event processor](../user_guide/event_processors/intro.md) names to be run as a result of receiving the GetReponse messages.\n\nThe processors are run in the order they are specified (`--processor proc1,proc2` or `--processor proc1 --processor proc2`).\n\n#### depth\n\nThe `[--depth]` flag set the gNMI extension depth value as defined [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md)\n\n### Examples\n\n```bash\n# simple Get RPC\ngnmic -a <ip:port> get --path \"/state/port[port-id=*]\"\n\n# Get RPC with multiple paths\ngnmic -a <ip:port> get --path \"/state/port[port-id=*]\" \\\n      --path \"/state/router[router-name=*]/interface[interface-name=*]\"\n\n# Get RPC with path prefix\ngnmic -a <ip:port> get --prefix \"/state\" \\\n      --path \"port[port-id=*]\" \\\n      --path \"router[router-name=*]/interface[interface-name=*]\"\n```\n\n<script\nid=\"asciicast-319562\" src=\"https://asciinema.org/a/319562.js\" async>\n</script>\n"
  },
  {
    "path": "docs/cmd/getset.md",
    "content": "### Description\n\nThe `getset` command is a combination of the gNMI [Get RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L57) and the gNMI [Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62).\n\nIt allows to conditionally execute a [Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62) based on a condition evaluated against a [GetResponse](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L420).\n\nThe `condition` written as a [`jq expression`](https://stedolan.github.io/jq/), is specified using the flag `--condition`.\n\nThe [SetRPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62) is executed only if the condition evaluates to `true`\n\n### Usage\n\n`gnmic [global-flags] getset [local-flags]`\n\n`gnmic [global-flags] gas [local-flags]`\n\n`gnmic [global-flags] gs [local-flags]`\n\n\n### Flags\n\n#### prefix\n\nAs per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--get`, `--update`, `--replace` and `--delete` flags. \n\nDefaults to `\"\"`.\n\n#### get\nThe mandatory get flag `[--get]` is used to specify the single [path](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) used in the Get RPC.\n\n#### model\n\nThe optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported.\n\n#### target\nWith the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message.\n\n#### type\n\nThe type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server.\n\nOne of:  ALL, CONFIG, STATE, OPERATIONAL (defaults to \"ALL\")\n\n#### condition\nThe `[--condition]` is a [`jq expression`](https://stedolan.github.io/jq/) that can be used to determine if the Set Request is executed based on the Get Response values.\n\n#### update\nThe `[--update]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request update path.\n\n#### replace\nThe `[--replace]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request replace path.\n\n#### delete\nThe `[--delete]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request delete path.\n\n#### value\nThe `[--value]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request value.\n\n### Examples\n\nThe command in the below example does the following:\n\n- gets the list of interface indexes to interface name mapping, \n\n- checks if the interface index (ifindex) 70 exists,\n\n- if it does, the set request changes the interface state to `enable` using the interface name.\n\n```bash\ngnmic getset -a <ip:port> \\\n    --get /interface/ifindex \\\n    --condition '.[] | .updates[].values[\"\"][\"srl_nokia-interfaces:interface\"][] | select(.ifindex==70) | (.name != \"\" or .name !=null)' \\\n    --update '.[] | .updates[].values[\"\"][\"srl_nokia-interfaces:interface\"][] | select(.ifindex==70) | \"interface[name=\" + .name + \"]/admin-state\"' \\\n    --value enable\n```"
  },
  {
    "path": "docs/cmd/listen.md",
    "content": "### Description\n`gnmic` can be used in a \"dial-out telemetry\" mode by means of the `listen` command. In the dial-out mode:\n\n* a network element is configured with the telemetry paths\n* a network element initiates a connection towards the server/collector (`gnmic` acts as a server in that case)\n\n!!! info\n    Currently `gnmic` only implements the dial-out support for Nokia[^1] SR OS 20.5.r1+ routers.\n\n### Usage\n\n```bash\ngnmic listen [global flags] [local flags]\n```\n\n### Flags\n\n#### address\n\nThe address flag `[-a | --address]` tells `gnmic` which address to bind an internal server to in an `address:port` format, e.g.: `0.0.0.0:57400`.\n\n#### tls-cert\n\nPath to the TLS certificate can be supplied with `--tls-cert` flag.\n\n#### tls-key\n\nPath to the private key can be supplied with `--tls-key` flag.\n\n#### max-concurrent-streams\n\nTo limit the maximum number of concurrent HTTP2 streams use the `--max-concurrent-streams` flag, the default value is 256.\n\n### prometheus-address\n\nThe prometheus-address flag `[--prometheus-address]` allows starting a prometheus server that can be scraped by a prometheus client. It exposes metrics like memory, CPU and file descriptor usage.\n\n### Examples\n\n#### TLS disabled server\n\nTo start `gnmic` as a server listening on all interfaces without TLS support is as simple as:\n\n```bash\ngnmic listen -a 0.0.0.0:57400\n```\n\n??? info \"SR OS configuration for non TLS dialout connections\"\n    ```\n    /configure system telemetry destination-group \"dialout\" allow-unsecure-connection\n    /configure system telemetry destination-group \"dialout\" destination 10.2.0.99 port 57400 router-instance \"management\"\n    /configure system telemetry persistent-subscriptions { }\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" admin-state enable\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" sensor-group \"port_stats\"\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" mode sample\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" sample-interval 1000\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" destination-group \"dialout\"\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" encoding bytes\n    /configure system telemetry sensor-groups { }\n    /configure system telemetry sensor-groups { sensor-group \"port_stats\" }\n    /configure system telemetry sensor-groups { sensor-group \"port_stats\" path \"/state/port[port-id=1/1/c1/1]/statistics/in-octets\" }\n    ```\n\n#### TLS enabled server\n\nBy using [tls-cert](#tls-cert) and [tls-key](#tls-key) flags it is possible to run `gnmic` with TLS.\n\n```bash\ngnmic listen -a 0.0.0.0:57400 --tls-cert gnmic.pem --tls-key gnmic-key.pem\n```\n\n??? info \"SR OS configuration for a TLS enabled dialout connections\"\n    The configuration below does not utilise router-side certificates and uses the certificate provided by the server (gnmic). The router will also not verify the certificate.\n    ```\n    /configure system telemetry destination-group \"dialout\" tls-client-profile \"client-tls\"\n    /configure system telemetry destination-group \"dialout\" destination 10.2.0.99 port 57400 router-instance \"management\"\n    /configure system telemetry persistent-subscriptions { }\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" admin-state enable\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" sensor-group \"port_stats\"\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" mode sample\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" sample-interval 1000\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" destination-group \"dialout\"\n    /configure system telemetry persistent-subscriptions subscription \"dialout\" encoding bytes\n    /configure system telemetry sensor-groups { }\n    /configure system telemetry sensor-groups { sensor-group \"port_stats\" }\n    /configure system telemetry sensor-groups { sensor-group \"port_stats\" path \"/state/port[port-id=1/1/c1/1]/statistics/in-octets\" }\n\n    /configure system security tls client-cipher-list \"client-ciphers\" { }\n    /configure system security tls client-cipher-list \"client-ciphers\" cipher 1 name tls-rsa-with-aes128-cbc-sha\n    /configure system security tls client-cipher-list \"client-ciphers\" cipher 2 name tls-rsa-with-aes128-cbc-sha256\n    /configure system security tls client-cipher-list \"client-ciphers\" cipher 3 name tls-rsa-with-aes256-cbc-sha\n    /configure system security tls client-cipher-list \"client-ciphers\" cipher 4 name tls-rsa-with-aes256-cbc-sha256\n    \n    /configure system security tls client-tls-profile \"client-tls\" admin-state enable\n    /configure system security tls client-tls-profile \"client-tls\" cipher-list \"client-ciphers\"\n    ```\n\n[^1]: Nokia dial-out proto definition can be found in [karimra/sros-dialout](https://github.com/karimra/sros-dialout/blob/master/NOKIA-dial-out-telemetry.proto)\n"
  },
  {
    "path": "docs/cmd/path.md",
    "content": "### Description\n\nWith `path` command it is possible to generate and search through the XPATH style paths extracted from a YANG file.\n\nBy extracting the XPATH styled paths from a YANG model it is made possible to utilize CLI search tools like `awk`, `sed` and alike to find the paths satisfying specific matching rules.\n\nThe embedded search capability allows to perform a quick and simple search through the model's paths using simple inclusion/exclusion operators.\n\n### Flags\n\n#### types\n\nWhen `--types` flag is present the extracted paths will also have a corresponding type printed out.\n\n#### path-type\n\nThe `--path-type` flag governs which style is used to display the path information. The default value is `xpath` which will produce the XPATH compatible paths.\n\nThe other option is `gnmi` which will result in the paths to be formatted using the gNMI Path Conventions.\n\n=== \"XPATH\"\n    ```bash\n    /state/sfm[sfm-slot=*]/hardware-data/firmware-revision-status\n    ```\n\n=== \"gNMI\"\n    ```bash\n    elem:{name:\"state\"}  elem:{name:\"sfm\"  key:{key:\"sfm-slot\"  value:\"*\"}}  elem:{name:\"hardware-data\"}  elem:{name:\"firmware-revision-status\"}\n    ```\n\n#### search\n\nWith the `--search` flag present an interactive CLI search dialog is displayed that allows to navigate through the paths list and perform a search.\n\n```bash\n❯ gnmic path --file _test/nokia-state-combined.yang --search\nUse the arrow keys to navigate: ↓ ↑ → ←  and : toggles search\n? select path: \n    /state/aaa/radius/statistics/coa/dropped/bad-authentication\n    /state/aaa/radius/statistics/coa/dropped/missing-auth-policy\n  ▸ /state/aaa/radius/statistics/coa/dropped/invalid\n    /state/aaa/radius/statistics/coa/dropped/missing-resource\n    /state/aaa/radius/statistics/coa/received\n    /state/aaa/radius/statistics/coa/accepted\n    /state/aaa/radius/statistics/coa/rejected\n    /state/aaa/radius/statistics/disconnect-messages/dropped/bad-authentication\n    /state/aaa/radius/statistics/disconnect-messages/dropped/missing-auth-policy\n↓   /state/aaa/radius/statistics/disconnect-messages/dropped/invalid\n```\n\n#### descr\n\nWhen the `--descr` flag is present, the leaf description is printed after the path, indented with a `\\t`.\n\n#### config-only\n\nWhen the `--config-only` flag is present, paths are generated only for YANG leaves representing config data.\n\n#### state-only\n\nWhen the `--state-only` flag is present, paths are generated only for YANG leaves representing state data.\n\n#### with-non-leaves\n\nWhen the `--with-non-leaves` flag is present, paths are generated not only for YANG leaves.\n\n### Examples\n\n```bash\n# output to stdout the XPATH styled paths\n# from the nokia-state module of nokia-state-combined.yang file\ngnmic path --file nokia-state-combined.yang\n\n# from the nokia-conf module\ngnmic path -m nokia-conf --file nokia-conf-combined.yang\n\n# with the gNMI styled paths\ngnmic path --file nokia-state-combined.yang --path-type gnmi\n\n# with path types\ngnmic path --file nokia-state-combined.yang --types\n\n# entering the interactive navigation prompt\ngnmic path --file nokia-state-combined.yang --search\n```\n\n<script id=\"asciicast-319579\" src=\"https://asciinema.org/a/319579.js\" async></script>\n\n[^1]: Nokia combined models can be found in [nokia/7x50_YangModels](https://github.com/nokia/7x50_YangModels/tree/master/latest_sros_20.5/nokia-combined) repo.\n"
  },
  {
    "path": "docs/cmd/processor.md",
    "content": "### Description\n\nThe `[processor | proc]` command allows running a set of event processor offline given an input of event messages.\n\nIf expects a file input (`--input`) containing a list of event messages and one or more processor(s) name(s) (`--name`) defined in the main config file.\nThis command will read the input file, validate the configured processors, apply them on the input event messages and print out the result.\n\n### Usage\n\n`gnmic [global-flags] processor [local-flags]`\n\n### Local Flags\n\nThe processor command supports the following local flags:\n\n#### name\n\nThe `[--name]` flag sets the list of processors names to apply to the input.\n\n#### input\n\nThe `[--input]` flag is used to specify the path to a file containing a list of event messages (`stdin` can be specified by giving the `-` value).\n\n#### delimiter\n\nThe `[--delimiter]` flag is used to set the delimiter string between event messages in the input file, defaults to `\\n`.\n\n#### output\n\nThe `[--output]` flag references an output name configured in the main config file. The command will out format the resulting messages according to the output config. This is mainly for outputs with `type: prometheus`\n\n### Example\n\nConfig File\n\n```yaml\noutputs:\n  out1:\n    type: prometheus\n    metric-prefix: \"gnmic\"\n    strings-as-labels: true\n    \nprocessors:\n  proc0:\n    event-strings:\n      value-names:\n        - \"^_\"\n      transforms:\n\n  # processor name\n  proc1:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - path-base:\n            apply-on: \"name\"\n  proc2:\n    event-strings:\n      tag-names:\n        - \"interface_name\"\n        - \"subscription-name\"\n        - \"source\"\n      transforms:\n        # strings function name\n        - to-upper:\n            apply-on: \"value\"\n        - to-upper:\n            apply-on: \"name\"\n  proc3:\n    # processor type\n    event-drop:\n      condition: \".values | length == 0\"\n```\n\ninput File:\n\n```json\n[\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-packets\": 351770\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-octets\": 35284165\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-unicast-packets\": 338985\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-broadcast-packets\": 1218\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-multicast-packets\": 5062\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-discarded-packets\": 6377\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-error-packets\": 128\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/in-fcs-error-packets\": 0\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-packets\": 568218\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-octets\": 219527024\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-mirror-octets\": 0\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-unicast-packets\": 567532\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-broadcast-packets\": 6\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-multicast-packets\": 680\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-discarded-packets\": 0\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-error-packets\": 0\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/out-mirror-packets\": 0\n    }\n  },\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1710890476202665500,\n    \"tags\": {\n      \"interface_name\": \"mgmt0\",\n      \"source\": \"clab-traps-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/interface/statistics/carrier-transitions\": 1\n    }\n  }\n]\n```\n\nCommand:\n\n```shell\ngnmic processor --input /path/to/event_msg.txt --delimiter \"\\n###\" --name proc1,proc2,proc3 --output out1\n```\n\nOutput:\n\n```text\n# HELP gnmic_in_packets gNMIc generated metric\n# TYPE gnmic_in_packets untyped\ngnmic_in_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 351770\n# HELP gnmic_in_octets gNMIc generated metric\n# TYPE gnmic_in_octets untyped\ngnmic_in_octets{subscription_name=\"sub1\",interface_name=\"mgmt0\",source=\"clab-traps-srl1\"} 3.5284165e+07\n# HELP gnmic_in_unicast_packets gNMIc generated metric\n# TYPE gnmic_in_unicast_packets untyped\ngnmic_in_unicast_packets{subscription_name=\"sub1\",interface_name=\"mgmt0\",source=\"clab-traps-srl1\"} 338985\n# HELP gnmic_in_broadcast_packets gNMIc generated metric\n# TYPE gnmic_in_broadcast_packets untyped\ngnmic_in_broadcast_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 1218\n# HELP gnmic_in_multicast_packets gNMIc generated metric\n# TYPE gnmic_in_multicast_packets untyped\ngnmic_in_multicast_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 5062\n# HELP gnmic_in_discarded_packets gNMIc generated metric\n# TYPE gnmic_in_discarded_packets untyped\ngnmic_in_discarded_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 6377\n# HELP gnmic_in_error_packets gNMIc generated metric\n# TYPE gnmic_in_error_packets untyped\ngnmic_in_error_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 128\n# HELP gnmic_in_fcs_error_packets gNMIc generated metric\n# TYPE gnmic_in_fcs_error_packets untyped\ngnmic_in_fcs_error_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 0\n# HELP gnmic_out_packets gNMIc generated metric\n# TYPE gnmic_out_packets untyped\ngnmic_out_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 568218\n# HELP gnmic_out_octets gNMIc generated metric\n# TYPE gnmic_out_octets untyped\ngnmic_out_octets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 2.19527024e+08\n# HELP gnmic_out_mirror_octets gNMIc generated metric\n# TYPE gnmic_out_mirror_octets untyped\ngnmic_out_mirror_octets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 0\n# HELP gnmic_out_unicast_packets gNMIc generated metric\n# TYPE gnmic_out_unicast_packets untyped\ngnmic_out_unicast_packets{subscription_name=\"sub1\",interface_name=\"mgmt0\",source=\"clab-traps-srl1\"} 567532\n# HELP gnmic_out_broadcast_packets gNMIc generated metric\n# TYPE gnmic_out_broadcast_packets untyped\ngnmic_out_broadcast_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 6\n# HELP gnmic_out_multicast_packets gNMIc generated metric\n# TYPE gnmic_out_multicast_packets untyped\ngnmic_out_multicast_packets{source=\"clab-traps-srl1\",subscription_name=\"sub1\",interface_name=\"mgmt0\"} 680\n# HELP gnmic_out_discarded_packets gNMIc generated metric\n# TYPE gnmic_out_discarded_packets untyped\ngnmic_out_discarded_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 0\n# HELP gnmic_out_error_packets gNMIc generated metric\n# TYPE gnmic_out_error_packets untyped\ngnmic_out_error_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 0\n# HELP gnmic_out_mirror_packets gNMIc generated metric\n# TYPE gnmic_out_mirror_packets untyped\ngnmic_out_mirror_packets{interface_name=\"mgmt0\",source=\"clab-traps-srl1\",subscription_name=\"sub1\"} 0\n# HELP gnmic_carrier_transitions gNMIc generated metric\n# TYPE gnmic_carrier_transitions untyped\ngnmic_carrier_transitions{subscription_name=\"sub1\",interface_name=\"mgmt0\",source=\"clab-traps-srl1\"} 1\n```\n"
  },
  {
    "path": "docs/cmd/prompt.md",
    "content": "## Description\nThe `prompt` command starts `gnmic` in an interactive prompt mode with the following auto-completion features:\n\n* All `gnmic` [commands names and their flags are suggested](../user_guide/prompt_suggestions.md#commands-and-flags-suggestions).\n* Values for the flags that rely on YANG-defined data (like `--path`, `--prefix`, `--model`,...) will be dynamically suggested, we call this feature [YANG-completions](../user_guide/prompt_suggestions.md#yang-completions).  \nThe auto-completions are generated from the YANG modules d with the `--file` and `--dir` flags.\n* Flags with the fixed set of values (`--format`, `--encoding`, ...) will get their [values suggested](../user_guide/prompt_suggestions.md#enumeration-suggestions).\n* Flags that require a [file path value will auto-suggest](../user_guide/prompt_suggestions.md#file-path-completions) the available files as the user types.\n\n\n### Usage\n\n`gnmic [global-flags] prompt [local-flags]`\n\n### Flags\n\n#### description-with-prefix\nWhen set, the description of the path elements in the suggestion box will contain module's prefix.\n\n#### description-with-types\nWhen set, the description of the path elements in the suggestion box will contain element's type information.\n\n#### max-suggestions\nThe `--max-suggestions` flag sets the number of lines that the suggestion box will display without scrolling.\n\nDefaults to 10. Note, the terminal height might limit the number of lines in the suggestions box. \n\n#### suggest-all-flags\nThe `--suggest-all-flags` makes `gnmic` prompt suggest both global and local flags for a sub-command.\n\nThe default behavior (when this flag is not set) is to suggest __only__ local flags for any typed sub-command.\n\n#### suggest-with-origin\nThe `--suggest-with-origin` flag prepends the suggested path with the module name to which this path belongs.\n\nThe path becomes rendered as `<module_name>:/<suggested-container>`. The module name will be used as the [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) of the gNMI path.\n\n#### suggestions-bg-color\nThe `--suggestions-bg-color` flag sets the background color of the left part of the suggestion box.\n\nDefaults to dark blue.\n\n#### description-bg-color\nThe `--description-bg-color` flag sets the background color of the right part of the suggestion box.\n\nDefaults to dark gray.\n\n#### prefix-color\nThe `--prefix-color` flag sets the gnmic prompt prefix color `gnmic> `.\n\nDefaults to dark blue.\n\n### Examples\nThe detailed explanation of the prompt command the the YANG-completions is provided on the [Prompt mode and auto-suggestions](../user_guide/prompt_suggestions.md) page.\n"
  },
  {
    "path": "docs/cmd/proxy.md",
    "content": "### Description\n\nThe `[proxy]` command start a gNMI proxy server. That relays gNMI messages to know targets (either configured or discovered).\n\n`gNMIc` proxy relays `Get`, `Set` and `Subscribe` RPCs but not `Capabilities`.\n\nTo designate the target of an RPC, the `Prefix.Target` field within the RPC request message should be utilized. This field is versatile, accepting a single target, a comma-separated list of targets, or the wildcard character `*` for broader targeting.\n\nHere are the key points regarding target specification:\n\n- The target can be set to a target name or a comma-separated list of targets.\n- Setting the target to `*` implies the selection of all known targets.\n- If the Prefix.Target field is not explicitly set, gNMIc defaults to treating it as if `*` were specified, thus applying the action to all known targets.\n\ngNMIc optimizes resource usage by reusing existing gNMI client instances whenever possible. If an appropriate gNMI client does not already exist, gNMIc will create a new instance as required.\n\n### Usage\n\n`gnmic [global-flags] proxy`\n\n### Configuration\n\nThe Proxy behavior is controlled using the `gnmi-server` section of the main config file:\n\n```yaml\ngnmi-server:\n  # the address the gNMI server will listen to\n  address: :57400\n  # tls config\n  tls:\n    # string, path to the CA certificate file,\n    # this certificate is used to verify the clients certificates.\n    ca-file:\n    # string, server certificate file.\n    cert-file:\n    # string, server key file.\n    key-file:\n    # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n    #  - request:         The server requests a certificate from the client but does not \n    #                     require the client to send a certificate. \n    #                     If the client sends a certificate, it is not required to be valid.\n    #  - require:         The server requires the client to send a certificate and does not \n    #                     fail if the client certificate is not valid.\n    #  - verify-if-given: The server requests a certificate, \n    #                     does not fail if no certificate is sent. \n    #                     If a certificate is sent it is required to be valid.\n    #  - require-verify:  The server requires the client to send a valid certificate.\n    #\n    # if no ca-file is present, `client-auth` defaults to \"\"`\n    # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n    client-auth: \"\"\n  max-subscriptions: 64\n  # maximum number of active Get/Set RPCs\n  max-unary-rpc: 64\n  # defines the maximum msg size (in bytes) the server can receive, \n  # defaults to 4MB\n  max-recv-msg-size:\n  # defines the maximum msg size (in bytes) the server can send,\n  # default to MaxInt32 (2147483647 bytes or 2.147483647 Gb)\n  max-send-msg-size:\n  # defines the maximum number of streams per streaming RPC.\n  max-concurrent-streams:\n  # defines the TCP keepalive tiem and interval for client connections, \n  # if unset it is enabled based on the OS. If negative it is disabled.\n  tcp-keepalive: \n  # set keepalive and max-age parameters on the server-side.\n  keepalive:\n    # MaxConnectionIdle is a duration for the amount of time after which an\n    # idle connection would be closed by sending a GoAway. Idleness duration is\n    # defined since the most recent time the number of outstanding RPCs became\n    # zero or the connection establishment.\n    # The current default value is infinity.\n    max-connection-idle:\n    # MaxConnectionAge is a duration for the maximum amount of time a\n    # connection may exist before it will be closed by sending a GoAway. A\n    # random jitter of +/-10% will be added to MaxConnectionAge to spread out\n    # connection storms.\n    # The current default value is infinity.\n    max-connection-age:\n    # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after\n    # which the connection will be forcibly closed.\n    # The current default value is infinity.\n    max-connection-age-grace:\n    # After a duration of this time if the server doesn't see any activity it\n    # pings the client to see if the transport is still alive.\n    # If set below 1s, a minimum value of 1s will be used instead.\n    # The current default value is 2 hours.\n    time: 120m\n    # After having pinged for keepalive check, the server waits for a duration\n    # of Timeout and if no activity is seen even after that the connection is\n    # closed.\n    # The current default value is 20 seconds.\n    timeout: 20s\n  # defines the minimum allowed sample interval, this value is used when the received sample-interval \n  # is greater than zero but lower than this minimum value.\n  min-sample-interval: 1ms\n  # defines the default sample interval, \n  # this value is used when the received sample-interval is zero within a stream/sample subscription.\n  default-sample-interval: 1s\n  # defines the minimum heartbeat-interval\n  # this value is used when the received heartbeat-interval is greater than zero but\n  # lower than this minimum value\n  min-heartbeat-interval: 1s\n  # enables the collection of Prometheus gRPC server metrics\n  enable-metrics: false\n  # enable additional debug logs\n  debug: false\n  # Enables Consul service registration\n  service-registration:\n    # Consul server address, default to localhost:8500\n    address:\n    # Consul Data center, defaults to dc1\n    datacenter: \n    # Consul username, to be used as part of HTTP basicAuth\n    username:\n    # Consul password, to be used as part of HTTP basicAuth\n    password:\n    # Consul Token, is used to provide a per-request ACL token \n    # which overrides the agent's default token\n    token:\n    # gnmi server service check interval, only TTL Consul check is enabled\n    # defaults to 5s\n    check-interval:\n    # Maximum number of failed checks before the service is deleted by Consul\n    # defaults to 3\n    max-fail:\n    # Consul service name\n    name:\n    # List of tags to be added to the service registration, \n    # if available, the instance-name and cluster-name will be added as tags,\n    # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name\n    tags:\n```\n\n### Example\n\n#### simple proxy\n\nThis config start gNMIc as a gNMI proxy serving 2 targets `router1` and `router2`\n\n```yaml\ngnmi-server:\n  address: :57401\n\ntargets:\n  router1:\n    skip-verify: true\n  router2:\n    skip-verify: true\n```\n\n```shell\ngnmic --config gnmic.yaml proxy\n```\n\n#### proxy with target discovery\n\n```yaml\ngnmi-server:\n  address: :57401\n\nloader:\n  type: file\n  path: targets.yaml\n```\n\n```shell\ngnmic --config gnmic.yaml proxy\n```\n\n#### proxy with service registration\n\n```yaml\ngnmi-server:\n  address: gnmi-proxy-address:57401\n  service-registration:\n    name: proxy\n    address: consul-server:8500\n\nloader:\n  type: file\n  path: targets.yaml\n```\n\n```shell\ngnmic --config gnmic.yaml proxy\n```\n"
  },
  {
    "path": "docs/cmd/set.md",
    "content": "## Description\n\nThe `set` command represents the [gNMI Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62).\n\nIt is used to send a [Set Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) to the specified target(s) and expects one [Set Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L356) per target.\n\nSet RPC allows the client to [modify the state](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state) of data on the target. The data specified referenced by a path can be [updated, replaced or deleted](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#343-transactions).\n\n!!! note\n    It is possible to combine `update`, `replace` and `delete` in a single `gnmic set` command.\n\n## Usage\n\n`gnmic [global-flags] set [local-flags]`\n\nThe Set Request can be any of (or a combination of) update, replace or/and delete operations.\n\n## Flags\n\n### prefix\n\nThe `--prefix` flag sets a common [prefix](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes) to all paths specified using the local `--path` flag. Defaults to `\"\"`.\n\nIf a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `\"origin:path\"`:\n\n!!! note\n    The path after the origin value has to start with a `/`\n\n```bash\ngnmic set --update \"openconfig-interfaces:/interfaces/interface:::<type>:::<value>\"\n```\n\n### target\n\nWith the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of a SetRequest message.\n\n### dry-run\n\nThe `--dry-run` flag allow to run a Set request without sending it to the targets.\nThis is useful while developing templated Set requests.\n\n### delete\n\nThe `--delete` flag allows creating a [SetRequest.Delete](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L337) as part of teh SetRequest message.\n\n### replace\n\nThe `--replace` flag allows creating a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message.\nIt is expected to be in the format `$path:::$type:::$value`, where `$path` is the gNMI path of the object to replace, `$type` is the type of the value and `$value` is the replacement value.\n\n### update\n\nThe `--update` flag allows creating a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message.\nIt is expected to be in the format `$path:::$type:::$value`, where `$path` is the gNMI path of the object to update, `$type` is the type of the value and `$value` is the update value.\n\n### replace-path and replace-value\n\nThe `--replace-path` and `--replace-value` flags are equivalent to the `--replace` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag.\n\n### update-path and update-value\n\nThe `--update-path` and `--update-value` flags are equivalent to the `--update` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag.\n\n### replace-path and replace-file\n\nThe `--replace-path` and `--replace-file` flags are equivalent to the `--replace` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag.\n\n### update-path and update-file\n\nThe `--update-path` and `--update-file` flags are equivalent to the `--update` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag.\n\n### replace-cli\n\nThe `--replace-cli` flag allows setting a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message.\nIt expects a single CLI command which will form the value path of the Replace, the path will be set to the CLI origin `cli`.\n\n### replace-cli-file\n\nThe `--replace-cli` flag allows setting a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message.\nIt expects a file containing one or multiple CLI commands which will form the value path of the Replace, the path will be set to the CLI origin `cli`.\n\n### update-cli\n\nThe `--update-cli` flag allows setting a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message.\nIt expects a single CLI command which will form the value path of the Replace, the path will be set to the CLI origin `cli`.\n\n### update-cli-file\n\nThe `--update-cli` flag allows setting a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message.\nIt expects a file containing one or multiple CLI commands which will form the value path of the Replace, the path will be set to the CLI origin `cli`.\n\n### request-file and request-vars\n\nSee [this section](#templated-set-request-file) below.\n\n### commit-id\n\nThe `--commit-id` flag sets the commit ID when the client needs to perform a commit confirmed set request as per: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-commit-confirmed.md\n\n### commit-request\n\nThe `--commit-request` flag is used together with the `--commit-id` flag to set the commit action to `Request`, essentially starting a commit request.\n\n### commit-confirm\n\nThe `--commit-confirm` flag is used together with the `--commit-id` flag to confirm an already started commit confirmed transaction.\n\n### commit-cancel\n\nThe `--commit-cancel` flag is used together with the `--commit-id` flag to cancel an already started commit confirmed transaction.\n\n### rollback-duration\n\nThe `--rollback-duration` flag is used together with the `--commit-id` flag to set the rollback duration of a commit confirmed transaction either at creation time or before the previous commit rollback expires.\n\n## Update Request\n\nThere are several ways to perform an update operation with gNMI Set RPC:\n\n#### 1. in-line update, implicit type\n\nUsing both `--update-path` and `--update-value` flags, a user can update a value for a given path.\n\n```bash\ngnmic set --update-path /configure/system/name --update-value router1\n\ngnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \\\n          --update-value enable\n```\n\nThe above 2 updates can be combined in the same CLI command:\n\n```bash\ngnmic set --update-path /configure/system/name \\\n          --update-value router1 \\\n          --update-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \\\n          --update-value enable\n```\n\n#### 2. in-line update, explicit type\n\nUsing the update flag `--update`, one can specify the path, value type and value in a single parameter using a delimiter `--delimiter`. Delimiter string defaults to `\":::\"`.\n\nSupported types: json, json_ietf, string, int, uint, bool, decimal, float, bytes, ascii.\n\n```bash\n# path:::value-type:::value\ngnmic set --update /configure/system/name:::json:::router1\n\ngnmic set --update /configure/router[router-name=Base]/interface[interface-name=system]/admin-state:::json:::enable\n\ngnmic set --update /configure/router[router-name=Base]/interface[interface-name=system]:::json:::'{\"admin-state\":\"enable\"}'\n```\n\n#### 3. update with a value from JSON or YAML file\n\nIt is also possible to specify the values from a local JSON or YAML file using `--update-file` flag for the value and `--update-path` for the path.\n\nIn which case the value encoding will be determined by the global flag `[ -e | --encoding ]`, both `JSON` and `JSON_IETF` are supported\n\nThe file's format is identified by its extension, json: `.json` and yaml `.yaml` or `.yml`.\n\n=== \"interface.json\"\n    ```bash\n    {\n        \"admin-state\": \"enable\",\n        \"ipv4\": {\n            \"primary\": {\n                \"address\": \"1.1.1.1\",\n                \"prefix-length\": 32\n            }\n        }\n    }\n    ```\n    ``` bash\n    gnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system] \\\n              --update-file interface.json\n    ```\n\n=== \"interface.yml\"\n\n    ```bash\n    \"admin-state\": enable\n    \"ipv4\":\n    \"primary\":\n        \"address\": 1.1.1.1\n        \"prefix-length\": 32\n    ```\n    ``` bash\n    gnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system] \\\n              --update-file interface.yml\n    ```\n\n## Replace Request\n\nThere are 3 main ways to specify a replace operation:\n\n#### 1. in-line replace, implicit type\n\nUsing both `--replace-path` and `--replace-value` flags, a user can replace a value for a given path. The type of the value is implicitly set to `JSON`:\n\n```bash\ngnmic set --replace-path /configure/system/name --replace-value router1\n```\n\n```bash\ngnmic set --replace-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \\\n          --replace-value enable\n```\n\nThe above 2 commands can be packed in the same CLI command:\n\n```bash\ngnmic set --replace-path /configure/system/name \\\n          --replace-value router1 \\\n          --replace-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \\\n          --replace-value enable\n```\n\n#### 2. in-line replace, explicit type\n\nUsing the replace flag `--replace`, you can specify the path, value type and value in a single parameter using a delimiter `--delimiter`. Delimiter string defaults to `\":::\"`.\n\nSupported types: json, json_ietf, string, int, uint, bool, decimal, float, bytes, ascii.\n\n```bash\ngnmic set --replace /configure/system/name:::json:::router1\n```\n\n```bash\ngnmic set --replace /configure/router[router-name=Base]/interface[interface-name=system]/admin-state:::json:::enable\n```\n\n#### 3. replace with a value from JSON or YAML file\n\nIt is also possible to specify the values from a local JSON or YAML file using flag `--replace-file` for the value and `--replace-path` for the path.\n\nIn which case the value encoding will be determined by the global flag `[ -e | --encoding ]`, both `JSON` and `JSON_IETF` are supported\n\nThe file is identified by its extension, json: `.json` and yaml `.yaml` or `.yml`.\n\n=== \"interface.json\"\n    ```bash\n    {\n        \"admin-state\": \"enable\",\n        \"ipv4\": {\n            \"primary\": {\n                \"address\": \"1.1.1.1\",\n                \"prefix-length\": 32\n            }\n        }\n    }\n    ```\n=== \"interface.yml\"\n    ```bash\n    \"admin-state\": enable\n    \"ipv4\":\n    \"primary\":\n        \"address\": 1.1.1.1\n        \"prefix-length\": 32\n    ```\n\nThen refer to the file with `--replace-file` flag\n\n``` bash\ngnmic set --replace-path /configure/router[router-name=Base]/interface[interface-name=system] \\\n          --replace-file interface.json\n```\n\n## Delete Request\n\nA deletion operation within the Set RPC is specified using the delete flag `--delete`.\n\nIt takes an XPATH pointing to the config node to be deleted:\n\n```bash\ngnmic set --delete \"/configure/router[router-name=Base]/interface[interface-name=dummy_interface]\"\n```\n\n## Templated Set Request file\n\nA Set Request can also be built based on one or multiple templates and (optionally) a set of variables.\n\nThe variables allow to generate a Set Request file on per target basis.\n\nIf no variable file is found, the execution continues and the template is assumed to be a static string.\n\nEach template specified with the flag `--request-file` is rendered against the variables defined in the file set with `--request-vars`.\nEach template results in a single gNMI Set Request.\n\n```bash\ngnmic set --request-file <template1> --request-file <template2> --request-vars <vars_file>\n```\n\n### Template Format\n\nThe rendered template data can be a `JSON` or `YAML` valid string.\n\nIt has 3 sections, `updates`, `replaces` and `deletes`.\n\nIn each of the `updates` and `replaces`, a `path`, a `value` and an `encoding` can be configured.\n\nIf not specified, `path` defaults to `/`, while `encoding` defaults to the value set with `--encoding` flag.\n\n`updates` and `replaces` result in a set of gNMI Set Updates in the Set RPC, `deletes` result in a set of gNMI paths to be deleted.\n\nThe `value` can be any arbitrary data format that the target accepts, it will be encoded based on the value of \"encoding\".\n=== \"JSON\"\n    ```json\n    {\n      \"updates\": [\n          {\n              \"path\": \"/interface[name=ethernet-1/1]\",\n              \"value\": {\n                  \"admin-state\": \"enable\",\n                  \"description\": \"to_spine1\"\n               },\n               \"encoding\": \"json_ietf\"\n          },\n          {\n              \"path\": \"/interface[name=ethernet-1/2]\",\n              \"value\": {\n                  \"admin-state\": \"enable\",\n                  \"description\": \"to_spine2\"\n               },\n               \"encoding\": \"json_ietf\"\n          }\n      ],\n      \"replaces\": [\n          {\n              \"path\": \"/interface[name=ethernet-1/3]\",\n              \"value\": {\n                  \"admin-state\": \"enable\",\n                  \"description\": \"to_spine3\"\n               }\n          },\n           {\n              \"path\": \"/interface[name=ethernet-1/4]\",\n              \"value\": {\n                  \"admin-state\": \"enable\",\n                  \"description\": \"to_spine4\"\n               }\n          }\n      ],\n      \"deletes\" : [\n          \"/interface[name=ethernet-1/5]\",\n          \"/interface[name=ethernet-1/6]\"\n      ]\n    }\n    ```\n=== \"YAML\"\n    ```yaml\n    updates:\n      - path: \"/interface[name=ethernet-1/1]\"\n        value:\n          admin-state: enable\n          description: \"to_spine1\"\n        encoding: \"json_ietf\"\n      - path: \"/interface[name=ethernet-1/2]\"\n        value:\n          admin-state: enable\n          description: \"to_spine2\"\n        encoding: \"json_ietf\"\n    replaces:\n      - path: \"/interface[name=ethernet-1/3]\"\n        value:\n          admin-state: enable\n          description: \"to_spine3\"\n      - path: \"/interface[name=ethernet-1/4]\"\n        value:\n          admin-state: enable\n          description: \"to_spine4\"\n    deletes:\n      - \"/interface[name=ethernet-1/5]\"\n      - \"/interface[name=ethernet-1/6]\"\n    ```\n\n### Per Target Template Variables\n\nThe file `--request-file` can be written as a [Go Text template](https://golang.org/pkg/text/template/).\n\nThe parsed template is loaded with additional functions from [gomplate](https://docs.gomplate.ca/).\n\n`gnmic` generates one gNMI Set request per target.\n\nThe template will be rendered using variables read from the file `--request-vars`. \nJust like the template file, the variables file can either be a `JSON` or `YAML` formatted file.\n\nIf the flag `--request-vars` is not set, `gnmic` looks for a file with the same path, name and **extension** as the `request-file`, appended with `_vars`.\n\nWithin the template, the variables defined in the `--request-vars` file are accessible using the `.Vars` notation, while the target name is accessible using the `.TargetName` notation.\n\nExample request template:\n\n```yaml\nreplaces:\n{{ $target := index .Vars .TargetName }}\n{{- range $interface := index $target \"interfaces\" }}\n  - path: \"/interface[name={{ index $interface \"name\" }}]\"\n    encoding: \"json_ietf\"\n    value: \n      admin-state: {{ index $interface \"admin-state\" | default \"disable\" }}\n      description: {{ index $interface \"description\" | default \"\" }}\n    {{- range $index, $subinterface := index $interface \"subinterfaces\" }}\n      subinterface:\n        - index: {{ $index }}\n          admin-state: {{ index $subinterface \"admin-state\" | default \"disable\" }}\n          {{- if has $subinterface \"ipv4-address\" }}\n          ipv4:\n            address:\n              - ip-prefix: {{ index $subinterface \"ipv4-address\" | toString }}\n          {{- end }}\n          {{- if has $subinterface \"ipv6-address\" }}\n          ipv6:\n            address:\n              - ip-prefix: {{ index $subinterface \"ipv6-address\" | toString }}\n          {{- end }}\n    {{- end }}\n{{- end }}\n```\n\nThe below variables file defines the input for 3 leafs:\n\n```yaml\nleaf1:57400:\n  interfaces:\n    - name: ethernet-1/1\n      admin-state: \"enable\"\n      description: \"leaf1_to_spine1\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.78.1/30\n    - name: ethernet-1/2\n      admin-state: \"enable\"\n      description: \"leaf1_to_spine2\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.79.1/30\n\nleaf2:57400:\n  interfaces:\n    - name: ethernet-1/1\n      admin-state: \"enable\"\n      description: \"leaf2_to_spine1\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.88.1/30\n    - name: ethernet-1/2\n      admin-state: \"enable\"\n      description: \"leaf2_to_spine2\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.89.1/30\n          \nleaf3:57400:\n  interfaces:\n    - name: ethernet-1/1\n      admin-state: \"enable\"\n      description: \"leaf3_to_spine1\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.98.1/30\n    - name: ethernet-1/2\n      admin-state: \"enable\"\n      description: \"leaf3_to_spine2\"\n      subinterfaces:\n        - admin-state: enable\n          ipv4-address: 192.168.99.1/30\n```\n\nResult Request file per target:\n\n=== \"leaf1\"\n    ```yaml\n    updates:\n      - path: /interface[name=ethernet-1/1]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf1_to_spine1\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.78.1/30\n      - path: /interface[name=ethernet-1/2]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf1_to_spine2\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.79.1/30\n    ```\n=== \"leaf2\"\n    ```yaml\n    updates:\n      - path: /interface[name=ethernet-1/1]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf2_to_spine1\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.88.1/30\n      - path: /interface[name=ethernet-1/2]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf2_to_spine2\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.89.1/30\n    ```\n=== \"leaf3\"\n    ```yaml\n    updates:\n      - path: /interface[name=ethernet-1/1]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf3_to_spine1\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.98.1/30\n      - path: /interface[name=ethernet-1/2]\n        encoding: \"json_ietf\"\n        value: \n          admin-state: enable\n          description: leaf3_to_spine2\n          subinterface:\n            - index: 0\n              admin-state: enable\n              ipv4:\n                address:\n                  - ip-prefix: 192.168.99.1/30\n    ```\n\n## Examples\n\n### 1. update\n\n#### in-line value\n\n```bash\ngnmic -a <ip:port> set --update-path /configure/system/name \\\n                       --update-value <system_name>\n```\n\n#### value from JSON file\n\n```bash\ncat jsonFile.json\n{\"name\": \"router1\"}\n\ngnmic -a <ip:port> set --update-path /configure/system \\\n                       --update-file <jsonFile.json>\n```\n\n```bash\necho '{\"name\": \"router1\"}' | gnmic -a <ip:port> set \\\n                             --update-path /configure/system \\\n                             --update-file -\n```\n\n#### specify value type\n\n```bash\ngnmic -a <ip:port> set --update /configure/system/name:::json:::router1\ngnmic -a <ip:port> set --update /configure/system/name@json@router1 \\\n                       --delimiter @\n```\n\n### 2. replace\n\n```bash\ncat interface.json\n{\"address\": \"1.1.1.1\", \"prefix-length\": 32}\n\ngnmic -a <ip:port> --insecure \\\n      set --replace-path /configure/router[router-name=Base]/interface[interface-name=interface1]/ipv4/primary \\\n          --replace-file interface.json\n```\n\n```bash\necho '{\"address\": \"1.1.1.1\", \"prefix-length\": 32}' | gnmic -a <ip:port> --insecure \\\n      set --replace-path /configure/router[router-name=Base]/interface[interface-name=interface1]/ipv4/primary \\\n          --replace-file -\n```\n\n### 3. delete\n\n```bash\ngnmic -a <ip:port> --insecure set --delete /configure/router[router-name=Base]/interface[interface-name=interface1]\n```\n\n<script\nid=\"asciicast-319562\" src=\"https://asciinema.org/a/319562.js\" async>\n</script>\n"
  },
  {
    "path": "docs/cmd/subscribe.md",
    "content": "### Description\n\nThe `[subscribe | sub]` command represents the [gNMI Subscribe RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L68).\n\nIt is used to send a [Subscribe Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L208) to the specified target(s) and expects one or multiple [Subscribe Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L232)\n\n### Usage\n\n`gnmic [global-flags] subscribe [local-flags]`\n\n### Local Flags\n\nThe subscribe command supports the following local flags:\n\n#### prefix\n\nThe `[--prefix]` flag sets a common [prefix](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes) to all paths specified using the local `--path` flag. Defaults to `\"\"`.\n\n#### path\n\nThe path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) to which the client wants to subscribe.\n\nMultiple paths can be specified by using repeated `--path` flags:\n\n```bash\ngnmic sub --path \"/state/ports[port-id=*]\" \\\n          --path \"/state/router[router-name=*]/interface[interface-name=*]\"\n```\n\nIf a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `\"origin:path\"`:\n\n!!! note\n    The path after the origin value has to start with a `/`\n\n```bash\ngnmic sub --path \"openconfig-interfaces:/interfaces/interface\"\n```\n\n#### target\n\nWith the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the SubscriptionList message.\n\n#### set-target\n\nThe `[--set-target]` flag is used to set the SubscribeRequest Prefix target value to the configured target name stripped of the port number.\n\n#### model\n\nThe `[--model]` flag is used to specify the schema definition modules that the target should use when extracting the data to stream back.\n\n#### qos\n\nThe `[--qos]` flag specifies the packet marking that is to be used for the responses to the subscription request. Default marking is set to `20`. If qos marking is not supported by a target the marking can be disabled by setting the value to `0`.\n\n#### mode\n\nThe `[--mode]` mode flag specifies the mode of subscription to be created.\n\nThis may be one of:\n[ONCE](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35151-once-subscriptions), [STREAM](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions) or [POLL](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35153-poll-subscriptions).\n\nIt is case insensitive and defaults to `STREAM`.\n\n#### stream subscription mode\n\nThe `[--stream-mode]` flag is used to specify the stream subscription mode.\n\nThis may be one of: [ON_CHANGE, SAMPLE or TARGET_DEFINED](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions)\n\nThis flag applies only if `--mode` is set to `STREAM`. It is case insensitive and defaults to `SAMPLE`.\n\n#### sample interval\n\nThe `[--sample-interval]` flag is used to specify the sample interval to be used by the target to send samples to the client.\n\nThis flag applies only in case `--mode` is set to `STREAM` and `--stream-mode` is set to `SAMPLE`.\n\nValid formats: `1s, 1m30s, 1h`. Defaults to `0s` which is the lowest interval supported by a target.\n\n#### heartbeat interval\n\nThe `[--heartbeat-interval]` flag is used to specify the server heartbeat interval.\n\nThe heartbeat interval value can be specified along with `ON_CHANGE` or `SAMPLE` stream subscriptions modes.\n\n* `ON_CHANGE`: The value of the data item(s) MUST be re-sent once per heartbeat interval regardless of whether the value has changed or not.\n* `SAMPLE`: The target MUST generate one telemetry update per heartbeat interval, regardless of whether the `--suppress-redundant` flag is set to true.\n\n#### quiet\n\nWith `[--quiet]` flag set `gnmic` will not output subscription responses to `stdout`. The `--quiet` flag is useful when `gnmic` exports the received data to one of the export providers.\n\n#### suppress redundant\n\nWhen the `[--suppress-redundant]` flag is set to true, the target SHOULD NOT generate a telemetry update message unless the value of the path being reported on has changed since the last update was generated.\n\nThis flag applies only in case `--mode` is set to `STREAM` and `--stream-mode` is set to `SAMPLE`.\n\n#### updates only\n\nWhen the `[--updates-only]` flag is set to true, the target MUST not transmit the current state of the paths that the client has subscribed to, but rather should send only updates to them.\n\n#### name\n\nThe `[--name]` flag is used to trigger one or multiple subscriptions already defined in the configuration file see [defining subscriptions](../user_guide/subscriptions.md)\n\n#### output\n\nThe `[--output]` flag is used to select one or multiple output already defined in the configuration file. \n\nOutputs defined under target take precedence over this flag, see [defining outputs](../user_guide/outputs/output_intro.md) and [defining targets](../user_guide/multi_targets)\n\n#### watch-config\n\nThe `[--watch-config]` flag is used to enable automatic target loading from the configuration source at runtime. \n\nOn each configuration change, gnmic reloads the list of targets, subscribes to new targets and/or deletes subscriptions to the deleted ones.\n\nOnly addition and deletion of targets are currently supported, changes in an existing target config are not possible.\n\n#### backoff\n\nThe `[--backoff]` flag is used to specify a duration between consecutive subscription towards targets. It defaults to `0s`  meaning all subscription are started in parallel.\n\nIf a locker is configured, the backoff timer is set to `100ms` by default.\n\n#### lock-retry\n\nThe `[--lock-retry]` flag is a duration used to set the wait time between consecutive lock attempts. Defaults to `5s`.\n\n#### history-snapshot\n\nThe `[--history-snapshot]` flag sets the snapshot value in the subscribe request [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md).\n\nThe value can be either nanoseconds since Unix epoch or a date in RFC3339 format.\n\n#### history-start\n\nThe `[--history-start]` flag sets the start value in the subscribe request Time Range [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md).\n\nThe value can be either nanoseconds since Unix epoch or a date in RFC3339 format.\n\n#### history-end\n\nThe `[--history-end]` flag sets the end value in the subscribe request Time Range [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md).\n\n#### depth\n\nThe `[--depth]` flag set the gNMI extension depth value as defined [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md)\n\n### Examples\n\n#### 1. streaming, target-defined, 10s interval\n\n```bash\ngnmic -a <ip:port> sub --path /state/port[port-id=*]/statistics\n```\n\n#### 2. streaming, sample, 30s interval\n\n```bash\ngnmic -a <ip:port> sub --path \"/state/port[port-id=*]/statistics\" \\\n                       --sample-interval 30s\n```\n\n#### 3. streaming, on-change, heartbeat interval 1min\n\n```bash\ngnmic -a <ip:port> sub --path \"/state/port[port-id=*]/statistics\" \\\n                       --stream-mode on-change \\\n                       --heartbeat-interval 1m\n```\n\n#### 4. once subscription\n\n```bash\ngnmic -a <ip:port> sub --path \"/state/port[port-id=*]/statistics\" \\\n                       --mode once\n```\n\n<script\nid=\"asciicast-319608\" src=\"https://asciinema.org/a/319608.js\" async>\n</script>\n"
  },
  {
    "path": "docs/deployments/clusters/containerlab/cluster_with_gnmi_server_and_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ and __data aggregation__ via clustering.\n\nThis deployment example includes:\n\n- A 3 instances [gNMIc cluster](../../../user_guide/HA.md),\n- A standalone `gNMIc` instance.\n- A [Prometheus](https://prometheus.io/) Server\n- A [Grafana](https://grafana.com/docs/) Server\n- A [Consul](https://www.consul.io/docs/intro) Server\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\nAll members of the cluster expose a gNMI Server that the single gNMIc instance will use to aggregate the collected data.\n\nThe aggregation `gNMIc` instance exposes a Prometheus output that is registered in `Consul` and is discoverable by the Prometheus server.\n\nThe whole lab is pretty much self organising:\n\n- The `gNMIc` cluster instances discover the targets dynamically using a [Docker Loader](../../../user_guide/targets/target_discovery/docker_discovery.md)\n- The `gNMIc` standalone instance, discovers the cluster instance using a [Consul Loader](../../../user_guide/targets/target_discovery/consul_discovery.md)\n- The Prometheus server discovers gNMIc's Prometheus output using [Consul Service Discovery](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_cluster_gnmi_server.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_cluster_gnmi_server.drawio\" async></script>\n\n\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmi-server.clab.yaml)\n- [gNMIc cluster config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic.yaml)\n- [gNMIc aggregator config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic-agg.yaml)\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/prometheus/prometheus.yaml)\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/datasources/datasource.yaml)\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/2.clusters/4.gnmi-server/containerlab\nsudo clab deploy -t gnmi-server.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n| #  |          Name           | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address      |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n|  1 | clab-lab24-agg-gnmic    | 2e9cc2821b07 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64  |\n|  2 | clab-lab24-consul-agent | c17d31d5f41b | consul:latest                | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64  |\n|  3 | clab-lab24-gnmic1       | 3d56e09955f2 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64  |\n|  4 | clab-lab24-gnmic2       | eba24dacea36 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64  |\n|  5 | clab-lab24-gnmic3       | caf473f500f6 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64  |\n|  6 | clab-lab24-grafana      | eaa224e62243 | grafana/grafana:latest       | linux |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64  |\n|  7 | clab-lab24-leaf1        | 6771dc8d3786 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64  |\n|  8 | clab-lab24-leaf2        | 5cfb1cf68958 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.14/24 | 2001:172:20:20::e/64  |\n|  9 | clab-lab24-leaf3        | c438f734e44d | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.19/24 | 2001:172:20:20::13/64 |\n| 10 | clab-lab24-leaf4        | ae4321825a03 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.17/24 | 2001:172:20:20::11/64 |\n| 11 | clab-lab24-leaf5        | ee7a520fd844 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.18/24 | 2001:172:20:20::12/64 |\n| 12 | clab-lab24-leaf6        | 59c3c515ef35 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64  |\n| 13 | clab-lab24-leaf7        | 111f858b19fd | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.22/24 | 2001:172:20:20::16/64 |\n| 14 | clab-lab24-leaf8        | 0ecc69891eb4 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.20/24 | 2001:172:20:20::14/64 |\n| 15 | clab-lab24-prometheus   | 357821ec726e | prom/prometheus:latest       | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64  |\n| 16 | clab-lab24-spine1       | 0f5f6f6dc5fa | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.13/24 | 2001:172:20:20::d/64  |\n| 17 | clab-lab24-spine2       | b718503d3b3f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.15/24 | 2001:172:20:20::f/64  |\n| 18 | clab-lab24-spine3       | e02f18d0e3ff | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64  |\n| 19 | clab-lab24-spine4       | 3347cba3f277 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.12/24 | 2001:172:20:20::c/64  |\n| 20 | clab-lab24-super-spine1 | 4abc7bcaf43c | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.16/24 | 2001:172:20:20::10/64 |\n| 21 | clab-lab24-super-spine2 | 5b2f5f153d43 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.21/24 | 2001:172:20:20::15/64 |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n```\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [gNMI Server](../../../user_guide/gnmi_server.md) documentation pages for more configuration options\n"
  },
  {
    "path": "docs/deployments/clusters/containerlab/cluster_with_influxdb_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering.\n\nThis deployment example includes:\n\n- A 3 instances [gNMIc cluster](../../../user_guide/HA.md),\n- A [InfluxDB](https://www.influxdata.com/) Server \n- A [Grafana](https://grafana.com/docs/) Server\n- A [Consul](https://www.consul.io/docs/intro) Server\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_cluster_deployments&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_cluster_deployments\" async></script>\n\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/lab21.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/gnmic.yaml)\n\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/grafana/datasources/datasource.yaml)\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/2.clusters/1.influxdb-output/containerlab\nsudo clab deploy -t lab21.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n| #  |          Name           | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address      |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n|  1 | clab-lab21-consul-agent | a6f6eb70965f | consul:latest                | linux |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64  |\n|  2 | clab-lab21-gnmic1       | 9758b0761431 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64  |\n|  3 | clab-lab21-gnmic2       | 6d6ae91c64bf | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64  |\n|  4 | clab-lab21-gnmic3       | 5df100a9fa73 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64  |\n|  5 | clab-lab21-grafana      | fe51bda1830c | grafana/grafana:latest       | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64  |\n|  6 | clab-lab21-influxdb     | 20712484d835 | influxdb:latest              | linux |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64  |\n|  7 | clab-lab21-leaf1        | ce084f636942 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.14/24 | 2001:172:20:20::e/64  |\n|  8 | clab-lab21-leaf2        | 5cbaba4bc9ff | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64  |\n|  9 | clab-lab21-leaf3        | a5e92ca08c7e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64  |\n| 10 | clab-lab21-leaf4        | 1ccfe0082b15 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.12/24 | 2001:172:20:20::c/64  |\n| 11 | clab-lab21-leaf5        | 7fd4144277a0 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64  |\n| 12 | clab-lab21-leaf6        | cb4df0d609db | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.13/24 | 2001:172:20:20::d/64  |\n| 13 | clab-lab21-leaf7        | 8f09b622365f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.19/24 | 2001:172:20:20::13/64 |\n| 14 | clab-lab21-leaf8        | 0ab91010b4a7 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.18/24 | 2001:172:20:20::12/64 |\n| 15 | clab-lab21-spine1       | 86d00f11b944 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.15/24 | 2001:172:20:20::f/64  |\n| 16 | clab-lab21-spine2       | 90cf49595ad2 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.20/24 | 2001:172:20:20::14/64 |\n| 17 | clab-lab21-spine3       | 1c694820eb88 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.16/24 | 2001:172:20:20::10/64 |\n| 18 | clab-lab21-spine4       | 1e3eac3de55f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64  |\n| 19 | clab-lab21-super-spine1 | aafc478de31d | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.21/24 | 2001:172:20:20::15/64 |\n| 20 | clab-lab21-super-spine2 | bb27b743c97f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.17/24 | 2001:172:20:20::11/64 |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n```\n\nCheck the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ as well as __data replication__.\n\nThe redundancy and high-availability are guaranteed by deploying a `gnmic` cluster.\n\nThe data replication is achieved using a `NATS` server acting as both a gnmic input and output.\n\nThis deployment example includes a:\n\n- 3 instances [gNMIc cluster](../../../user_guide/HA.md), \n- A [NATS](https://nats.io/) Server acting as both [input](../../../user_guide/inputs/nats_input.md) and [output](../../../user_guide/outputs/nats_output.md) \n- A [Prometheus](https://prometheus.io/) Server\n- A [Grafana](https://grafana.com/docs/) Server\n- A [Consul](https://www.consul.io/docs/intro) Server\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\nEach `gnmic` instance outputs the streamed gNMI data to NATS, and reads back all the data from the same NATS server (including its own),\n\nThis effectively guarantees that each instance holds the data streamed by the whole cluster.\n\nLike in the previous examples, each `gnmic` instance will also register its Prometheus output service in `Consul`.\n\nBut before doing so, it will attempt to acquire a key lock `gnmic/$CLUSTER_NAME/prometheus-output`,  (`use-lock: true`)\n\n```yaml\nprom-output:\n  type: prometheus\n  listen: \":9804\"\n  service-registration:\n    address: consul-agent:8500\n    use-lock: true # <===\n\n```\nSince only one instance can hold a lock, only one prometheus output is registered, so only one output is scraped by Prometheus.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/cluster_clab_prom_nats.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fcluster_clab_prom_nats.drawio\" async></script>\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/lab23.clab.yaml)\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/gnmic.yaml)\n- [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/prometheus/prometheus.yaml)\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/grafana/datasources/datasource.yaml)\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab\nsudo clab deploy -t lab23.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n| #  |          Name           | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address      |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n|  1 | clab-lab23-consul-agent | cfdaf19e9435 | consul:latest                | linux |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64  |\n|  2 | clab-lab23-gnmic1       | 7e2a4060a1ae | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64  |\n|  3 | clab-lab23-gnmic2       | 9e27e4620104 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64  |\n|  4 | clab-lab23-gnmic3       | bb7471eb5f49 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64  |\n|  5 | clab-lab23-grafana      | 3fbf7755c49e | grafana/grafana:latest       | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64  |\n|  6 | clab-lab23-leaf1        | a61624d5312b | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.21/24 | 2001:172:20:20::15/64 |\n|  7 | clab-lab23-leaf2        | ef86f701b379 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.14/24 | 2001:172:20:20::e/64  |\n|  8 | clab-lab23-leaf3        | 352433a2ab3b | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.22/24 | 2001:172:20:20::16/64 |\n|  9 | clab-lab23-leaf4        | 5ddba813d36f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.19/24 | 2001:172:20:20::13/64 |\n| 10 | clab-lab23-leaf5        | aad20f4b9969 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64  |\n| 11 | clab-lab23-leaf6        | 757c76527a75 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.15/24 | 2001:172:20:20::f/64  |\n| 12 | clab-lab23-leaf7        | d85e94aaa0dd | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64  |\n| 13 | clab-lab23-leaf8        | ef6210c0e5aa | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.20/24 | 2001:172:20:20::14/64 |\n| 14 | clab-lab23-nats         | f1a1f351bbf8 | nats:latest                  | linux |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64  |\n| 15 | clab-lab23-prometheus   | f7f194a934c5 | prom/prometheus:latest       | linux |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64  |\n| 16 | clab-lab23-spine1       | ddbf4e804097 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.16/24 | 2001:172:20:20::10/64 |\n| 17 | clab-lab23-spine2       | f48323a4de88 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.17/24 | 2001:172:20:20::11/64 |\n| 18 | clab-lab23-spine3       | 2a65eed26a7e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.18/24 | 2001:172:20:20::12/64 |\n| 19 | clab-lab23-spine4       | ea59d0e5d9ed | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.12/24 | 2001:172:20:20::c/64  |\n| 20 | clab-lab23-super-spine1 | 37af6cd04dd8 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64  |\n| 21 | clab-lab23-super-spine2 | 3408891a0718 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.13/24 | 2001:172:20:20::d/64  |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n```\nCheck the  [NATS Output](../../../user_guide/outputs/nats_output.md), [NATS Input](../../../user_guide/inputs/nats_input.md) and  [Prometheus Output](../../../user_guide/outputs/influxdb_output.md) documentation pages for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/containerlab/cluster_with_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering.\n\nThis deployment example includes:\n\n- A 3 instances [gNMIc cluster](../../../user_guide/HA.md),\n- A [Prometheus](https://prometheus.io/) Server\n- A [Grafana](https://grafana.com/docs/) Server\n- A [Consul](https://www.consul.io/docs/intro) Server\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\n`gnmic` will also register its Prometheus output service in `Consul` so that Prometheus can discover which Prometheus servers are available to be scraped.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_cluster_deployments&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_cluster_deployments\" async></script>\n\n\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/lab22.clab.yaml)\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/gnmic.yaml)\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/prometheus/prometheus.yaml)\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/grafana/datasources/datasource.yaml)\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/2.clusters/2.prometheus-output/containerlab\nsudo clab deploy -t lab22.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n| #  |          Name           | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address      |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n|  1 | clab-lab22-consul-agent | 542169159f8b | consul:latest                | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64  |\n|  2 | clab-lab22-gnmic1       | c04b2b597e7a | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64  |\n|  3 | clab-lab22-gnmic2       | 49604280d82d | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64  |\n|  4 | clab-lab22-gnmic3       | 49e910460cad | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64  |\n|  5 | clab-lab22-grafana      | c0a37b012d29 | grafana/grafana:latest       | linux |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64  |\n|  6 | clab-lab22-leaf1        | c6429b499c11 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.19/24 | 2001:172:20:20::13/64 |\n|  7 | clab-lab22-leaf2        | 62f235b39a62 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.17/24 | 2001:172:20:20::11/64 |\n|  8 | clab-lab22-leaf3        | 78d3b4e62a6b | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64  |\n|  9 | clab-lab22-leaf4        | 8c5d80b4d916 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.13/24 | 2001:172:20:20::d/64  |\n| 10 | clab-lab22-leaf5        | 508d4d2389b4 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.16/24 | 2001:172:20:20::10/64 |\n| 11 | clab-lab22-leaf6        | 14ce19a8c5da | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64  |\n| 12 | clab-lab22-leaf7        | c4f6e586baa3 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.20/24 | 2001:172:20:20::14/64 |\n| 13 | clab-lab22-leaf8        | 1e00e6346bf1 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.12/24 | 2001:172:20:20::c/64  |\n| 14 | clab-lab22-prometheus   | 5ed38ce63113 | prom/prometheus:latest       | linux |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64  |\n| 15 | clab-lab22-spine1       | 38247b0f81e7 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64  |\n| 16 | clab-lab22-spine2       | 76bf66748acd | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.21/24 | 2001:172:20:20::15/64 |\n| 17 | clab-lab22-spine3       | 5c8776e2fc77 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.15/24 | 2001:172:20:20::f/64  |\n| 18 | clab-lab22-spine4       | de67e5b92f36 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.14/24 | 2001:172:20:20::e/64  |\n| 19 | clab-lab22-super-spine1 | 00f0aee0265a | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.18/24 | 2001:172:20:20::12/64 |\n| 20 | clab-lab22-super-spine2 | 418888eb7325 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64  |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+\n```\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/docker-compose/cluster_with_influxdb_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering.\n\nThis deployment example includes:\n\n- A 3 instances [`gnmic` cluster](../../../user_guide/HA.md),\n- A single [InfluxDB output](../../../user_guide/outputs/influxdb_output.md)\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/cluster_influxdb.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fcluster_influxdb.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/docker-compose/gnmic.yaml)\n\nDownload the files, update the `gnmic` config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ as well as __data replication__.\n\nThe redundancy and high-availability are guaranteed by deploying a `gnmic` cluster.\n\nThe data replication is achieved using a `NATS` server acting as both a gnmic input and output.\n\nThis deployment example includes a:\n\n- 3 instances [`gnmic` cluster](../../../user_guide/HA.md), \n- A NATS [input](../../../user_guide/inputs/nats_input.md) and [output](../../../user_guide/outputs/nats_output.md) \n- A [Prometheus output](../../../user_guide/outputs/prometheus_output.md)\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\nEach `gnmic` instance outputs the streamed gNMI data to NATS, and reads back all the data from the same NATS server (including its own),\n\nThis effectively guarantees that each instance holds the data streamed by the whole cluster.\n\nLike in the previous examples, each `gnmic` instance will also register its Prometheus output service in `Consul`.\n\nBut before doing so, it will attempt to acquire a key lock `gnmic/$CLUSTER_NAME/prometheus-output`,  (`use-lock: true`)\n\n```yaml\nprom-output:\n  type: prometheus\n  listen: \":9804\"\n  service-registration:\n    address: consul-agent:8500\n    use-lock: true # <===\n```\n\nSince only one instance can hold a lock, only one prometheus output is registered, so only one output is scraped by Prometheus.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/cluster_nats_prometheus.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fcluster_nats_prometheus.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/docker-compose.yaml)\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/gnmic.yaml)\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/prometheus/prometheus.yaml)\n\nDownload the files, update the `gnmic` config files with the desired subscriptions and targets.\n\n!!! note\n    The targets outputs list should include the nats output name\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the  [NATS Output](../../../user_guide/outputs/nats_output.md), [NATS Input](../../../user_guide/inputs/nats_input.md) and  [Prometheus Output](../../../user_guide/outputs/influxdb_output.md) documentation pages for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/docker-compose/cluster_with_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering.\n\nThis deployment example includes:\n\n- A 3 instances [`gnmic` cluster](../../../user_guide/HA.md),\n- A single [Prometheus output](../../../user_guide/outputs/prometheus_output.md)\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\n`gnmic` will also register its Prometheus output service in `Consul` so that Prometheus can discover which Prometheus servers are available to be scraped\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/cluster_prometheus&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fcluster_prometheus\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/docker-compose.yaml)\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/gnmic.yaml)\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/prometheus/prometheus.yaml)\n\nDownload the files, update the `gnmic` config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/clusters/kubernetes/cluster_with_prometheus_output.md",
    "content": "The purpose of this deployment is to achieve __redundancy__, __high-availability__ using Kubernetes and `gnmic`'s internal clustering mechanism.\n\nThis deployment example includes:\n\n- A 3 instances [`gnmic` cluster](../../../user_guide/HA.md),\n- A single [Prometheus output](../../../user_guide/outputs/prometheus_output.md)\n\nThe leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)\n\n`gnmic` can be discovered by `Prometheus` using Kubernetes service discovery. Kubernetes uses a [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) with a StatefulSet to disable the internal load balancing across multiple pods of the same StatefulSet and allow `Prometheus` to discover all instances of `gnmic`.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/cluster_prometheus_kubernetes&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fcluster_prometheus_kubernetes\" async></script>\n\n<ins>[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator#quickstart) must be installed prior to `gnmic` deployment.</ins> (Can also be installed via [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) helm chart or [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus))\n\nDeployment files:\n\n- [gnmic](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app)\n- [consul](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul)\n- [prometheus servicemonitor](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/prometheus/servicemonitor.yaml)\n\nDownload the files, update the `gnmic` ConfigMap with the desired subscriptions and targets and make sure that `prometheus servicemonitor` is in a namespace or has a label that `Prometheus operator` is watching.\n\nDeploy it with:\n\n```bash\nkubectl create ns gnmic\nkubectl apply -n gnmic -f kubernetes/consul\nkubectl apply -n gnmic -f kubernetes/gnmic-app\n# Before deploying the Prometheus ServiceMonitor\n# Install Prometheus operator or kube-prometheus or kube-prometheus-stack helm chart\n# Otherwise the command will fail\nkubectl apply -f kubernetes/prometheus\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/deployments_intro.md",
    "content": "There are numerous ways `gnmic` can be deployed, each fulfilling a specific use case. \n\nWhether it is gNMI telemetry collection and export to a single output, \nor clustered data pipelines with high availability and redundancy, \nthe below examples should cover the most common use cases.\n\nIn this section you will find multiple deployment examples, using [docker-compose](https://docs.docker.com/compose/) or [containerlab](https://containerlab.srlinux.dev/).\nEach deployment comes with:\n\n- a `docker-compose` or `clab` file \n- one or multiple `gnmic` configuration file(s)\n- extra configuration files if required by the use case (e.g: prometheus, grafana,...)\n\nThe [containerlab](https://containerlab.srlinux.dev/) examples come with a fabric deployed using Nokia's [SR Linux](https://learn.srlinux.dev)\n\nIf you don't find an example that fits your needs, feel free to open an issue on [github](https://github.com/openconfig/gnmic/issues/new)\n\n### Single Instance\n\nThese examples showcase single `gnmic` instance deployments with the most commonly used outputs\n\n- NATS output: [clab](single-instance/containerlab/nats-output.md), [docker-compose](single-instance/docker-compose/nats-output.md) \n- Kafka output: [clab](single-instance/containerlab/kafka-output.md), [docker-compose](single-instance/docker-compose/kafka-output.md)\n- InfluxDB output: [clab](single-instance/containerlab/influxdb-output.md), [docker-compose](single-instance/docker-compose/influxdb-output.md)\n- Prometheus output: [clab](single-instance/containerlab/prometheus-output.md), [docker-compose](single-instance/docker-compose/prometheus-output.md)\n- Multiple outputs: [clab](single-instance/containerlab/multiple-outputs.md), [docker-compose](single-instance/docker-compose/multiple-outputs.md)\n\n### Clusters\n\n`gnmic` can also be deployed in [clustered mode](../user_guide/HA.md) to either load share the targets connections between multiple instances and offer connection resiliency,\nand/or replicate the collected data among all the cluster members\n\n- InfluxDB output: [clab](clusters/containerlab/cluster_with_influxdb_output.md), [docker-compose](clusters/docker-compose/cluster_with_influxdb_output.md)\n- Prometheus output: [clab](clusters/containerlab/cluster_with_prometheus_output.md), [docker-compose](clusters/docker-compose/cluster_with_prometheus_output.md)\n- Prometheus output with data replication: [clab](clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md), [docker-compose](clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md)\n\n### Pipelines\n\nBuilding data pipelines using `gnmic` is achieved using the [outputs](../user_guide/outputs/output_intro.md) and [inputs](../user_guide/inputs/input_intro.md) plugins.\n\nYou will be able to process the data in a serial fashion, split it for parallel processing or mirror it to create a forked pipeline.\n\n- NATS to Prometheus: [docker-compose](pipelines/docker-compose/nats_prometheus.md)\n- NATS to InfluxDB: [docker-compose](pipelines/docker-compose/nats_influxdb.md)\n- Clustered pipeline: [docker-compose](pipelines/docker-compose/gnmic_cluster_nats_prometheus.md)\n- Forked pipeline: [docker-compose](pipelines/docker-compose/forked_pipeline.md)\n"
  },
  {
    "path": "docs/deployments/pipelines/docker-compose/forked_pipeline.md",
    "content": "\nThe purpose of this deployment is to create a forked data pipeline using `NATS` , `Influxdb` and `Prometheus`\n\nThe example includes 3 `gnmic` instances.\n\n- The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server.\n- The second and third, called `relay1` and `relay2`, reads the data from `NATS` and writes it to either `InfluxDB` or `Prometheus`\n\nThis deployment enables a few use cases:\n\n- Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay.\n- Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version.\n- Fork the data into a separate pipeline for a different use case.\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/pipeline_gnmic_nats_gnmic_prometheus_gnmic_influxdb.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fpipeline_gnmic_nats_gnmic_prometheus_gnmic_influxdb.drawio\" async></script>\n\nDeployment files:\n\n- [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose.yaml)\n\n- [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-collector.yaml)\n- [gnmic relay1 config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-relay1.yaml)\n- [gnmic relay2 config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-relay2.yaml)\n- [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines//4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/prometheus/prometheus.yaml)\n\nDownload the files, update the `gnmic` collector config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/pipelines/docker-compose/gnmic_cluster_nats_prometheus.md",
    "content": "\nThe purpose of this deployment is to create a clustered data pipeline using `NATS` and `Prometheus`.\nAchieving __redundancy__, __high-availability__ and __data replication__, all in clustered data pipeline.\n\nThe example is divided in 2 parts:\n\n- Clustered collectors and single relay\n- Clustered collectors and clustered relays\n\nThese 2 examples are essentially scaled-out versions of this [example](nats_prometheus.md)\n\n### Clustered collectors and single relay\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/pipeline_cluster_nats_prometheus.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fpipeline_cluster_nats_prometheus.drawio\" async></script>\n\nDeployment files:\n\n- [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/docker-compose.yaml)\n- [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml)\n- [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml)\n- [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml)\n\nDownload the files, update the `gnmic` collectors config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options\n\n### Clustered collectors and clustered relays\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/pipeline_cluster_nats_cluster_prometheus.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fpipeline_cluster_nats_cluster_prometheus.drawio\" async></script>\n\nDeployment files:\n\n- [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/docker-compose.yaml)\n- [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-collector.yaml)\n- [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-relay.yaml)\n- [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/prometheus/prometheus.yaml)\n\nDownload the files, update the `gnmic` collectors config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/pipelines/docker-compose/nats_influxdb.md",
    "content": "The purpose of this deployment is to create data pipeline using `NATS` and `InfluxDB`\n\nThe example includes 2 `gnmic` instances.\n\n- The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server.\n- The second, called `relay`, reads the data from `NATS` and writes it to `InfluxDB`\n\nThis deployment enables a few use cases:\n\n- Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay.\n- Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version.\n- Fork the data into a separate pipeline for a different use case.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/pipeline_nats_influxdb.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fpipeline_nats_influxdb.drawio\" async></script>\n\nDeployment files:\n\n- [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/docker-compose.yaml)\n\n- [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-collector.yaml)\n- [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-relay.yaml)\n\nDownload the files, update the `gnmic` collector config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/pipelines/docker-compose/nats_prometheus.md",
    "content": "\nThe purpose of this deployment is to create data pipeline using `NATS` and `Prometheus`\n\nThe example includes 2 `gnmic` instances.\n\n- The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server.\n- The second, called `relay`, reads the data from `NATS` and writes it to `Prometheus`\n\nThis deployment enables a few use cases:\n\n- Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay.\n- Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version.\n- Fork the data into a separate pipeline for a different use case.\n\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/pipeline_nats_prometheus.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fpipeline_nats_prometheus.drawio\" async></script>\n\nDeployment files:\n\n- [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/docker-compose.yaml)\n\n- [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml)\n- [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml)\n\nDownload the files, update the `gnmic` collector config files with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/influxdb-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to an `InfluxDB` instance.\n\nThis deployment example includes a single `gnmic` instance, a single [InfluxDB](https://www.influxdata.com/) server acting as an [InfluxDB output](../../../user_guide/outputs/influxdb_output.md) and a [Grafana](https://grafana.com/docs/) server\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:2,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/influxdb.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/gnmic.yaml)\n\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/grafana/datasources/datasource.yaml)\n\nThe deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets.\nEdit the subscriptions section if needed.\n\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/3.influxdb-output/containerlab\nsudo clab deploy -t influxdb.clab.yaml\n```\n\n```text\n+---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| # |        Name         | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address     |\n+---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| 1 | clab-lab13-gnmic    | 1ee4c75ff443 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64 |\n| 2 | clab-lab13-grafana  | a932207780bb | grafana/grafana:latest       | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64 |\n| 3 | clab-lab13-influxdb | 0768ba6ca10b | influxdb:latest              | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64 |\n| 4 | clab-lab13-leaf1    | e0e2045fca7f | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64 |\n| 5 | clab-lab13-leaf2    | 75b8978e734c | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64 |\n| 6 | clab-lab13-leaf3    | 7b03eed78f5d | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64 |\n| 7 | clab-lab13-leaf4    | 19007ce81e04 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64 |\n| 8 | clab-lab13-spine1   | c044fc51196d | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64 |\n| 9 | clab-lab13-spine2   | bcfa52ad2772 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64 |\n+---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n```\n\nCheck the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/kafka-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to a `Kafka` broker.\n\nMultiple 3rd Party systems (acting as a Kafka consumers) can then read the data from the `Kafka` broker for further processing.\n\nThis deployment example includes a single `gnmic` instance and a single [Kafka output](../../../user_guide/outputs/kafka_output.md)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/containerlab/kafka.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/containerlab/gnmic.yaml)\n\nThe deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets.\nEdit the subscriptions section if needed.\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/2.kafka-output/containerlab\nsudo clab deploy -t kafka.clab.yaml\n```\n\n```text\n+---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| # |            Name             | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address     |\n+---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| 1 | clab-lab12-gnmic            | e79d31f92a7a | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64 |\n| 2 | clab-lab12-kafka-server     | 004a338cdb3d | bitnami/kafka:latest         | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64 |\n| 3 | clab-lab12-leaf1            | b9269bac3adf | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64 |\n| 4 | clab-lab12-leaf2            | baaeea0ad1a6 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64 |\n| 5 | clab-lab12-leaf3            | 08127014b3cd | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64 |\n| 6 | clab-lab12-leaf4            | da037997c5ff | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64 |\n| 7 | clab-lab12-spine1           | c3bcfe40fcc7 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64 |\n| 8 | clab-lab12-spine2           | 842b259d01b0 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64 |\n| 9 | clab-lab12-zookeeper-server | 5c89e48fdff1 | bitnami/zookeeper:latest     | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64 |\n+---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n```\n\nCheck the [Kafka Output](../../../user_guide/outputs/kafka_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/multiple-outputs.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to multiple outputs.\n\nThis deployment example includes:\n\n- A single `gnmic` instance\n- A [Prometheus](../../../user_guide/outputs/prometheus_output.md) Server\n- An [InfluxDB](../../../user_guide/outputs/influxdb_output.md) Server\n- A [NATS](../../../user_guide/outputs/nats_output.md) Server\n- A [Kafka](../../../user_guide/outputs/kafka_output.md) Server\n- A [File](../../../user_guide/outputs/file_output.md) output\n- A [Consul Agent](https://www.consul.io/docs/agent)\n- A [Grafana Server](https://grafana.com/docs/)\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:4,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/multiple-outputs.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/gnmic.yaml)\n\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/prometheus/prometheus.yaml)\n\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/grafana/datasources/datasource.yaml)\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/5.multiple-outputs/containerlab\nsudo clab deploy -t multiple-outputs.clab.yaml\n```\n\n```text\n+----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| #  |            Name             | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address     |\n+----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n|  1 | clab-lab15-consul-agent     | 14f864fb1da9 | consul:latest                | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64 |\n|  2 | clab-lab15-gnmic            | cfb8bfca7547 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64 |\n|  3 | clab-lab15-grafana          | 56c19565e27c | grafana/grafana:latest       | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64 |\n|  4 | clab-lab15-influxdb         | f2d0b2186e10 | influxdb:latest              | linux |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64 |\n|  5 | clab-lab15-kafka-server     | efe445dbf0f0 | bitnami/kafka:latest         | linux |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64 |\n|  6 | clab-lab15-leaf1            | 42d57c79385e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64 |\n|  7 | clab-lab15-leaf2            | e4b041046779 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64 |\n|  8 | clab-lab15-leaf3            | ba87204f2678 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.13/24 | 2001:172:20:20::d/64 |\n|  9 | clab-lab15-leaf4            | 327461ee913e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.15/24 | 2001:172:20:20::f/64 |\n| 10 | clab-lab15-nats             | 0363dae05edf | nats:latest                  | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64 |\n| 11 | clab-lab15-prometheus       | 44611ebe4a03 | prom/prometheus:latest       | linux |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64 |\n| 12 | clab-lab15-spine1           | 8b2b430eea87 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.12/24 | 2001:172:20:20::c/64 |\n| 13 | clab-lab15-spine2           | 425bea3a243e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.14/24 | 2001:172:20:20::e/64 |\n| 14 | clab-lab15-zookeeper-server | 91b546eb7bf9 | bitnami/zookeeper:latest     | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64 |\n+----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n```\n\nCheck the [gnmic outputs](../../../user_guide/outputs/output_intro.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/nats-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to a `NATS` server.\n\nMultiple 3rd Party systems (acting as a NATS clients) can then read the data from the `NATS` server for further processing.\n\nThis deployment example includes a single `gnmic` instance and a single [NATS output](../../../user_guide/outputs/nats_output.md)\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/tree/main/examples/deployments/1.single-instance/1.nats-output/containerlab/nats.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/tree/main/examples/deployments/1.single-instance/1.nats-output/containerlab/gnmic.yaml)\n\nThe deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets.\nEdit the subscriptions section if needed.\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/1.nats-output/containerlab\nsudo clab deploy -t nats.clab.yaml\n```\n\n```text\n+---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+\n| # |       Name        | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address  |     IPv6 Address     |\n+---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+\n| 1 | clab-lab11-gnmic  | 955eaa35b730 | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24 | 2001:172:20:20::3/64 |\n| 2 | clab-lab11-leaf1  | f0f61a79124e | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.4/24 | 2001:172:20:20::4/64 |\n| 3 | clab-lab11-leaf2  | de714ee79856 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24 | 2001:172:20:20::9/64 |\n| 4 | clab-lab11-leaf3  | c674b7bbb898 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24 | 2001:172:20:20::8/64 |\n| 5 | clab-lab11-leaf4  | c37033f30e99 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.7/24 | 2001:172:20:20::7/64 |\n| 6 | clab-lab11-nats   | ebbd346d2aee | nats:latest                  | linux |       | running | 172.20.20.2/24 | 2001:172:20:20::2/64 |\n| 7 | clab-lab11-spine1 | 0fe91271bdfe | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.6/24 | 2001:172:20:20::6/64 |\n| 8 | clab-lab11-spine2 | 6b05f4e42cc4 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.5/24 | 2001:172:20:20::5/64 |\n+---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+\n```\n\nCheck the [NATS Output](../../../user_guide/outputs/nats_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/prometheus-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and make it available for scraping by a `Prometheus` client.\n\nThis deployment example includes a single `gnmic` instance, a [Prometheus Server](https://prometheus.io/), a [Consul agent](https://www.consul.io/docs/agent) used by Prometheus to discover gNMIc's [Prometheus output](../../../user_guide/outputs/prometheus_output.md) and a [Grafana](https://grafana.com/docs/) server.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:3,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/gnmic.yaml)\n\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus/prometheus.yaml)\n\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/grafana/datasources/datasource.yaml)\n\nThe deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets.\nEdit the subscriptions section if needed.\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/4.prometheus-output/containerlab\nsudo clab deploy -t prometheus.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n| #  |          Name           | Container ID |            Image             | Kind  | Group |  State  |  IPv4 Address   |     IPv6 Address     |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n|  1 | clab-lab14-consul-agent | e402b0516753 | consul:latest                | linux |       | running | 172.20.20.4/24  | 2001:172:20:20::4/64 |\n|  2 | clab-lab14-gnmic        | 53943cdb8cde | ghcr.io/openconfig/gnmic:latest | linux |       | running | 172.20.20.3/24  | 2001:172:20:20::3/64 |\n|  3 | clab-lab14-grafana      | 1a57efb74f37 | grafana/grafana:latest       | linux |       | running | 172.20.20.2/24  | 2001:172:20:20::2/64 |\n|  4 | clab-lab14-leaf1        | 8343848fbd7a | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.9/24  | 2001:172:20:20::9/64 |\n|  5 | clab-lab14-leaf2        | 9986ff987048 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.8/24  | 2001:172:20:20::8/64 |\n|  6 | clab-lab14-leaf3        | 25a212fcb7a1 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.11/24 | 2001:172:20:20::b/64 |\n|  7 | clab-lab14-leaf4        | 025373e9f192 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.10/24 | 2001:172:20:20::a/64 |\n|  8 | clab-lab14-prometheus   | ae9b47c49c8d | prom/prometheus:latest       | linux |       | running | 172.20.20.5/24  | 2001:172:20:20::5/64 |\n|  9 | clab-lab14-spine1       | fb9abd5b4c5c | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.7/24  | 2001:172:20:20::7/64 |\n| 10 | clab-lab14-spine2       | f32906f19d55 | ghcr.io/nokia/srlinux        | srl   |       | running | 172.20.20.6/24  | 2001:172:20:20::6/64 |\n+----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+\n```\n\nCheck the [Prometheus output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/containerlab/prometheus-remote-write-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and use [Prometheus remote write API](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) to push it to different monitoring systems like [Prometheus](https://prometheus.io), [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)...\n\nThis deployment example includes a single `gnmic` instance, a [Prometheus Server](https://prometheus.io/), and a [Grafana](https://grafana.com/docs/) server.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:5,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/clab_deployments.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fclab_deployments.drawio\" async></script>\n\nDeployment files:\n\n- [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prom_write.clab.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/gnmic.yaml)\n\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus/prometheus.yaml)\n\n- [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/grafana/datasources/datasource.yaml)\n\nThe deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets.\nEdit the subscriptions section if needed.\n\nDeploy it with:\n\n```bash\ngit clone https://github.com/openconfig/gnmic.git\ncd gnmic/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab\nsudo clab deploy -t prometheus.clab.yaml\n```\n\n```text\n+----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+\n| #  |          Name           | Container ID |            Image             | Kind  |  State  |  IPv4 Address   | IPv6 Address |\n+----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+\n|  1 | clab-lab16-consul-agent | 10054b55e722 | consul:latest                | linux | running | 172.19.19.3/24  | N/A          |\n|  2 | clab-lab16-gnmic        | 1eeab0771731 | ghcr.io/openconfig/gnmic:latest | linux | running | 172.19.19.5/24  | N/A          |\n|  3 | clab-lab16-grafana      | fd09146937ef | grafana/grafana:latest       | linux | running | 172.19.19.2/24  | N/A          |\n|  4 | clab-lab16-leaf1        | 0c8f5bf7bafb | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.11/24 | N/A          |\n|  5 | clab-lab16-leaf2        | a33868bef0a3 | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.9/24  | N/A          |\n|  6 | clab-lab16-leaf3        | 3fb3b459cd48 | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.10/24 | N/A          |\n|  7 | clab-lab16-leaf4        | bb2cbc064b05 | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.6/24  | N/A          |\n|  8 | clab-lab16-prometheus   | 63b6fb1551de | prom/prometheus:latest       | linux | running | 172.19.19.4/24  | N/A          |\n|  9 | clab-lab16-spine1       | 76853ab9c4a8 | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.8/24  | N/A          |\n| 10 | clab-lab16-spine2       | fdf42ca0fec1 | ghcr.io/nokia/srlinux        | srl   | running | 172.19.19.7/24  | N/A          |\n+----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+\n```\n\nCheck the [Prometheus Remote Write output](../../../user_guide/outputs/prometheus_write_output.md) documentation page for more configuration options.\n"
  },
  {
    "path": "docs/deployments/single-instance/docker-compose/influxdb-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to an `InfluxDB` instance.\n\nThis deployment example includes a single `gnmic` instance and a single [InfluxDB output](../../../user_guide/outputs/influxdb_output.md)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/single_instance_influxdb.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fsingle_instance_influxdb.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/docker-compose/gnmic1.yaml)\n\nDownload both files, update the `gnmic` config file with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/single-instance/docker-compose/kafka-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to a `Kafka` broker.\n\nMultiple 3rd Party systems (acting as a Kafka consumers) can then read the data from the `Kafka` broker for further processing.\n\nThis deployment example includes a single `gnmic` instance and a single [Kafka output](../../../user_guide/outputs/kafka_output.md)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/single_instance_kafka.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fsingle_instance_kafka.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/docker-compose/gnmic1.yaml)\n\nDownload both files, update the `gnmic` config file with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Kafka Output](../../../user_guide/outputs/kafka_output.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/single-instance/docker-compose/multiple-outputs.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to multiple outputs.\n\nThis deployment example includes:\n\n- A single `gnmic` instance\n- A [Prometheus output](../../../user_guide/outputs/prometheus_output.md)\n- An [InfluxDB output](../../../user_guide/outputs/influxdb_output.md)\n- A [NATS output](../../../user_guide/outputs/nats_output.md)\n- A [Kafka output](../../../user_guide/outputs/kafka_output.md)\n- A [File output](../../../user_guide/outputs/file_output.md)\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/single_instance_multiple_outputs.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fsingle_instance_multiple_outputs.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/gnmic1.yaml)\n\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/prometheus/prometheus.yaml)\n\nDownload both files, update the `gnmic` config file with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [gnmic outputs](../../../user_guide/outputs/output_intro.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/single-instance/docker-compose/nats-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and write it to a `NATS` server.\n\nMultiple 3rd Party systems (acting as a NATS clients) can then read the data from the `NATS` server for further processing.\n\nThis deployment example includes a single `gnmic` instance and a single [NATS output](../../../user_guide/outputs/nats_output.md)\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/single_instance_nats.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fsingle_instance_nats.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/1.nats-output/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/1.nats-output/docker-compose/gnmic1.yaml)\n\nDownload both files, update the `gnmic` config file with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [NATS Output](../../../user_guide/outputs/nats_output.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/deployments/single-instance/docker-compose/prometheus-output.md",
    "content": "The purpose of this deployment is to collect gNMI data and make it available for scraping by a `Prometheus` client.\n\nThis deployment example includes a single `gnmic` instance and a single [Prometheus output](../../../user_guide/outputs/prometheus_output.md)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/single_instance_prometheus.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fsingle_instance_prometheus.drawio\" async></script>\n\nDeployment files:\n\n- [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/docker-compose.yaml)\n\n- [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/gnmic1.yaml)\n\n- [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/prometheus/prometheus.yaml)\n\nDownload both files, update the `gnmic` config file with the desired subscriptions and targets.\n\nDeploy it with:\n\n```bash\nsudo docker-compose up -d\n```\n\nCheck the [Prometheus output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options\n"
  },
  {
    "path": "docs/global_flags.md",
    "content": "### address\n\nThe address flag `[-a | --address]` is used to specify the target's gNMI server address in address:port format, for e.g: `192.168.113.11:57400`\n\nMultiple target addresses can be specified, either as comma separated values:\n\n```bash\ngnmic --address 192.168.113.11:57400,192.168.113.12:57400 \n```\n\nor by using the `--address` flag multiple times:\n\n```bash\ngnmic -a 192.168.113.11:57400 --address 192.168.113.12:57400\n```\n\n### auth-scheme\n\nThe auth-scheme flag `--auth-scheme` is used to specify the authorization header type.\nFor example, if `auth-scheme` is set to `Basic`, the gNMI requests headers will include an `Authorization` header with\nvalue `Basic base64enc(username:password)`.\n\n### cluster-name\n\nThe `[--cluster-name]` flag is used to specify the cluster name the `gnmic` instance will join.\n\nThe cluster name is used as part of the locked keys to share targets between multiple gnmic instances.\n\nDefaults to `default-cluster`\n\n### config\n\nThe `--config` flag specifies the location of a configuration file that `gnmic` will read.\n\nIf not specified, gnmic searches for a file named `.gnmic` with extensions `yaml, yml, toml or json` in the following locations:\n\n* `$PWD`\n* `$HOME`\n* `$XDG_CONFIG_HOME`\n* `$XDG_CONFIG_HOME/gnmic`\n\n### debug\n\nThe debug flag `[-d | --debug]` enables the printing of extra information when sending/receiving an RPC\n\n### dir\n\nA path to a directory which `gnmic` would recursively traverse in search for the additional YANG files which may be required by YANG files specified with `--file` to build the YANG tree.\n\nCan also point to a single YANG file instead of a directory.\n\nMultiple `--dir` flags can be supplied.\n\n### encoding\n\nThe encoding flag `[-e | --encoding]` is used to specify the [gNMI encoding](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#23-structured-data-types) of the Update part of a [Notification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format) message.\n\nIt is case insensitive and must be one of: JSON, BYTES, PROTO, ASCII, JSON_IETF\n\n### exclude\n\nThe `--exclude` flag specifies the YANG module __names__ to be excluded from the tree generation when YANG modules names clash.\n\nMultiple `--exclude` flags can be supplied.\n\n### file\n\nA path to a YANG file or a directory with YANG files which `gnmic` will use with prompt, generate and path commands.\n\nMultiple `--file` flags can be supplied.\n\n### format\n\nFive output formats can be configured by means of the `--format` flag. `[proto, protojson, prototext, json, event]` The default format is `json`.\n\nThe `proto` format outputs the gnmi message as raw bytes, this value is not allowed when the output type is file (file system, stdout or stderr) see [outputs](user_guide/outputs/output_intro.md)\n\nThe `prototext` and `protojson` formats are the message representation as defined in [prototext](https://godoc.org/google.golang.org/protobuf/encoding/prototext) and [protojson](https://godoc.org/google.golang.org/protobuf/encoding/protojson)\n\nThe `event` format emits the received gNMI SubscribeResponse updates and deletes as a list of events tagged with the keys present in the subscribe path (as well as some metadata) and a timestamp\n\nHere goes an example of the same response emitted to stdout in the respective formats:\n\n=== \"protojson\"\n    ```json\n    {\n      \"update\": {\n      \"timestamp\": \"1595584408456503938\",\n      \"prefix\": {\n        \"elem\": [\n          {\n            \"name\": \"state\"\n          },\n          {\n            \"name\": \"system\"\n          },\n          {\n            \"name\": \"version\"\n          }\n        ]\n      },\n        \"update\": [\n          {\n            \"path\": {\n              \"elem\": [\n                {\n                 \"name\": \"version-string\"\n               }\n              ]\n            },\n            \"val\": {\n              \"stringVal\": \"TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\\r\\nAll rights reserved. All use subject to applicable license agreements.\\r\\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros\"\n            }\n          }\n        ]\n      }\n    }\n    ```\n\n=== \"prototext\"\n    ```yaml\n    update: {\n      timestamp: 1595584168675434221\n      prefix: {\n        elem: {\n          name: \"state\"\n        }\n        elem: {\n          name: \"system\"\n        }\n        elem: {\n          name: \"version\"\n        }\n      }\n      update: {\n        path: {\n          elem: {\n            name: \"version-string\"\n          }\n        }\n        val: {\n          string_val: \"TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\\r\\nAll rights reserved. All use subject to applicable license agreements.\\r\\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros\"\n        }\n      }\n    }\n    ```\n=== \"json\"\n    ```json\n    {\n      \"source\": \"172.17.0.100:57400\",\n      \"subscription-name\": \"default\",\n      \"timestamp\": 1595584326775141151,\n      \"time\": \"2020-07-24T17:52:06.775141151+08:00\",\n      \"prefix\": \"state/system/version\",\n      \"updates\": [\n        {\n          \"Path\": \"version-string\",\n          \"values\": {\n            \"version-string\": \"TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\\r\\nAll rights reserved. All use subject to applicable license agreements.\\r\\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros\"\n          }\n        }\n      ]\n    }\n    ```\n=== \"event\"\n    ```json\n    [\n      {\n        \"name\": \"default\",\n        \"timestamp\": 1595584587725708234,\n        \"tags\": {\n          \"source\": \"172.17.0.100:57400\",\n          \"subscription-name\": \"default\"\n        },\n        \"values\": {\n          \"/state/system/version/version-string\": \"TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\\r\\nAll rights reserved. All use subject to applicable license agreements.\\r\\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros\"\n        }\n      }\n    ]\n    ```\n\n### gzip\n\nThe `[--gzip]` flag enables gRPC gzip compression.\n\n### insecure\n\nThe insecure flag `[--insecure]` is used to indicate that the client wishes to establish an non-TLS enabled gRPC connection.\n\nTo disable certificate validation in a TLS-enabled connection use [`skip-verify`](#skip-verify) flag.\n\n### instance-name\n\nThe `[--instance-name]` flag is used to give a unique name to the running `gnmic` instance. This is useful when there are multiple instances of `gnmic` running at the same time, either for high-availability and/or scalability\n\n### log\n\nThe `--log` flag enables log messages to appear on stderr output. By default logging is disabled.\n\n### log-file\n\nThe log-file flag `[--log-file <path>]` sets the log output to a file referenced by the path. This flag supersede the `--log` flag\n\n### log-max-size\n\nThe `[--log-max-size]` flag enables log rotation and sets the maximum size of the log file in megabytes before it gets rotated.\n\n### log-max-backups\n\nThe `[--log-max-backups]` flag sets the maximum number of old log files to retain. The default is to retain all old log files.\n\n### log-compress\n\nThe `[--log-compress]` flag determines if the rotated log files should be compressed using gzip. The default is not to perform compression.\n\n### no-prefix\n\nThe no prefix flag `[--no-prefix]` disables prefixing the json formatted responses with `[ip:port]` string.\n\nNote that in case a single target is specified, the prefix is not added.\n\n### password\n\nThe password flag `[-p | --password]` is used to specify the target password as part of the user credentials.\n\nNote that in case multiple targets are used, all should use the same credentials.\n\n### proto-dir\n\nThe `[--proto-dir]` flag is used to specify a list of directories where `gnmic` will search for the proto file names specified with `--proto-file`.\n\n### proto-file\n\nThe `[--proto-file]` flag is used to specify a list of proto file names that `gnmic` will use to decode ProtoBytes values. only Nokia SROS proto is currently supported.\n\n### proxy-from-env\n\nThe proxy-from-env flag `[--proxy-from-env]` indicates that the gnmic should use the HTTP/HTTPS proxy addresses defined in the environment variables `http_proxy` and `https_proxy` to reach the targets specified using the `--address` flag.\n\n### retry\n\nThe retry flag `[--retry]` specifies the wait time before each retry.\n\nValid formats: 10s, 1m30s, 1h.  Defaults to 10s\n\n### skip-verify\n\nThe skip verify flag `[--skip-verify]` indicates that the target should skip the signature verification steps, in case a secure connection is used.  \n\n### targets-file\n\nThe `[--targets-file]` flag is used to configure a [file target loader](user_guide/targets/target_discovery/file_discovery.md)\n\n### timeout\n\nThe timeout flag `[--timeout]` specifies the gRPC timeout after which the connection attempt fails.\n\nValid formats: 10s, 1m30s, 1h.  Defaults to 10s\n\n### tls-ca\n\nThe TLS CA flag `[--tls-ca]` specifies the root certificates for verifying server certificates encoded in PEM format.\n\n### tls-cert\n\nThe TLS cert flag `[--tls-cert]` specifies the public key for the client encoded in PEM format.\n\n### tls-key\n\nThe TLS key flag `[--tls-key]` specifies the private key for the client encoded in PEM format.\n\n### tls-max-version\n\nThe TLS max version flag `[--tls-max-version]` specifies the maximum supported TLS version supported by gNMIc when creating a secure gRPC connection.\n\n### tls-min-version\n\nThe tls min version flag `[--tls-min-version]` specifies the minimum supported TLS version supported by gNMIc when creating a secure gRPC connection.\n\n### tls-server-name\n\nThe TLS server name flag `[--tls-server-name]` sets the server name to be used when verifying the hostname on the returned certificates unless `--skip-verify` is set.\n\nThis global flag applies to all targets.\n\n### tls-version\n\nThe tls version flag `[--tls-version]` specifies a single supported TLS version gNMIc when creating a secure gRPC connection.\n\nThis flag overwrites the previously listed flags `--tls-max-version` and `--tls-min-version`.\n\n### log-tls-secret\n\nThe log TLS secret flag `[--log-tls-secret]` makes gnmic to log the per-session pre-master secret so that it can be used to [decrypt TLS](https://gitlab.com/wireshark/wireshark/-/wikis/TLS#tls-decryption) secured gNMI communications with, for example, Wireshark.\n\nThe secret will be saved to a file named `<target-name>.tlssecret.log`.\n\n### token\n\nThe token flag `[--token]` sets a token value to be added to each RPC as an Authorization Bearer Token.\n\nApplied only in the case of a secure gRPC connection.\n\n### username\n\nThe username flag `[-u | --username]` is used to specify the target username as part of the user credentials.\n\n### calculate-latency\n\nThe `--calculate-latency` flag augments subscribe et get responses by calculating the delta between the message timestamp and the receive timestamp.\nThe resulting message will include 4 extra fields:\n\n* `recv-timestamp`:The receive timestamp in nanoseconds.\n* `recv-time`: The receive time in ISO 8601 date and time representation, extended to include fractional seconds and a time zone offset..\n* `latency-nano`: The difference between the message timestamp and the receive time in nanoseconds.\n* `latency-milli`: The difference between the message timestamp and the receive time in milliseconds.\n\n### metadata\n\nThe `[-H | --metadata]` flag adds custom headers to any gRPC request. `gnmic -H header1=value1 -H header2=value2`\n"
  },
  {
    "path": "docs/index.md",
    "content": "<nbsp/>\n<p style=\"text-align:center;\"><img src=https://raw.githubusercontent.com/openconfig/gnmic/main/docs/images/gnmic-headline.svg?sanitize=true/></p>\n\n[![github release](https://img.shields.io/github/release/openconfig/gnmic.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/)\n[![Github all releases](https://img.shields.io/github/downloads/openconfig/gnmic/total.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/)\n\n---\n\n`gnmic` <small>_(pronoun.: gee·en·em·eye·see)_</small> is a gNMI CLI client that provides full support for Capabilities, Get, Set and Subscribe RPCs with collector capabilities.\n\n## Features\n* **Full support for gNMI RPCs**  \n  Every gNMI RPC has a [corresponding command](https://gnmic.openconfig.net/basic_usage/) with all of the RPC options configurable by means of the local and global flags.\n* **Flexible collector deployment**  \n  `gnmic` can be deployed as a gNMI collector that supports multiple output types ([NATS](user_guide/outputs/nats_output.md), [Kafka](user_guide/outputs/kafka_output.md), [Prometheus](user_guide/outputs/prometheus_output.md), [InfluxDB](user_guide/outputs/influxdb_output.md),...).  \n  The collector can be deployed either as a [single instance](deployments/deployments_intro/#single-instance), as part of a [cluster](user_guide/HA/), or used to form [data pipelines](deployments/deployments_intro/#pipelines).\n* **gNMI data manipulation**   \n  `gnmic` collector supports [data transformation](user_guide/event_processors/intro/) capabilities that can be used to adapt the collected data to your specific use case.\n* **Dynamic targets loading**  \n  `gnmic` support [target loading at runtime](user_guide/targets/target_discovery/discovery_intro.md) based on input from external systems.\n* **YANG-based path suggestions**  \n  Your CLI magically becomes a YANG browser when `gnmic` is executed in [prompt](user_guide/prompt_suggestions.md) mode. In this mode the flags that take XPATH values will get auto-suggestions based on the provided YANG modules. In other words - voodoo magic :exploding_head:\n* **Multiple configuration sources**  \n  gnmic supports [flags](user_guide/configuration_flags), [environment variables](user_guide/configuration_env/) as well as [file based](https://gnmic.openconfig.net/user_guide/configuration_file/) configurations.\n* **Multi-target operations**  \n  Commands can operate on [multiple gNMI targets](https://gnmic.openconfig.net/user_guide/targets/) for bulk configuration/retrieval/subscription.\n* **Multiple subscriptions**  \n  With file based configuration it is possible to define and configure [multiple subscriptions](https://gnmic.openconfig.net/user_guide/subscriptions/) which can be independently associated with gNMI targets.\n* **Inspect gNMI messages**  \n  With the `textproto` output format and the logging capabilities of `gnmic` you can see the actual gNMI messages being sent/received. Its like having a gNMI looking glass!\n* **Configurable TLS enforcement**  \n  gNMI client supports both TLS and [non-TLS](https://gnmic.openconfig.net/global_flags/#insecure) transports so you can start using it in a lab environment without having to care about the PKI.\n* **Dial-out telemetry**  \n  The [dial-out telemetry server](https://gnmic.openconfig.net/cmd/listen/) is provided for Nokia SR OS.\n* **Pre-built multi-platform binaries**  \n  Statically linked [binaries](https://github.com/openconfig/gnmic/releases) made in our release pipeline are available for major operating systems and architectures. Making [installation](https://gnmic.openconfig.net/install/) a breeze!\n* **Extensive and friendly documentation**  \n  You won't be in need to dive into the source code to understand how `gnmic` works, our [documentation site](https://gnmic.openconfig.net) has you covered.\n\n## Quick start guide\n### Installation\n```\nbash -c \"$(curl -sL https://get-gnmic.openconfig.net)\"\n```\n### Capabilities request\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities\n```\n\n### Get request\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      get --path /state/system/platform\n```\n\n### Set request\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      set --update-path /configure/system/name \\\n          --update-value gnmic_demo\n```\n\n### Subscribe request\n```\ngnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \\\n      sub --path \"/state/port[port-id=1/1/c1/1]/statistics/in-packets\"\n```\n"
  },
  {
    "path": "docs/install.md",
    "content": "`gnmic` is a single binary built for the Linux, Mac OS and Windows operating systems distributed via [Github releases](https://github.com/openconfig/gnmic/releases).\n\n### Linux/Mac OS\n\nTo download & install the latest release the following automated [installation script](https://github.com/openconfig/gnmic/blob/main/install.sh) can be used:\n\n```bash\nbash -c \"$(curl -sL https://get-gnmic.openconfig.net)\"\n```\n\nAs a result, the latest `gnmic` version will be installed in the `/usr/local/bin` directory and the version information will be printed out.\n\n```text\nDownloading gnmic_0.0.3_Darwin_x86_64.tar.gz...\nMoving gnmic to /usr/local/bin\n\nversion : 0.0.3\n commit : f541948\n   date : 2020-04-23T12:06:07Z\n gitURL : https://github.com/openconfig/gnmic.git\n   docs : https://gnmic.openconfig.net\n\nInstallation complete!\n```\n\nTo install a specific version of `gnmic`, provide the version with `-v` flag to the installation script:\n```bash\nbash -c \"$(curl -sL https://get-gnmic.openconfig.net)\" -- -v 0.5.0\n```\n\n#### Packages\n\nLinux users running distributions with support for `deb`/`rpm` packages can install `gnmic` using pre-built packages:\n\n```bash\nbash -c \"$(curl -sL https://get-gnmic.openconfig.net)\" -- --use-pkg\n```\n\n#### Upgrade\n\nTo upgrade `gnmic` to the latest version use the `upgrade` command:\n\n```bash\n# upgrade using binary file\ngnmic version upgrade\n\n# upgrade using package\ngnmic version upgrade --use-pkg\n```\n\n### Windows\n\nWindows users should use [WSL](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux) on Windows and install the linux version of the tool.\n\n### Docker\n\nThe `gnmic` container image can be pulled from Dockerhub or GitHub container registries. The tag of the image corresponds to the release version and `latest` tag points to the latest available release:\n\n```bash\n# pull latest release from dockerhub\ndocker pull gnmic/gnmic:latest\n# pull a specific release from dockerhub\ndocker pull gnmic/gnmic:0.7.0\n\n# pull latest release from github registry\ndocker pull ghcr.io/openconfig/gnmic:latest\n# pull a specific release from github registry\ndocker pull ghcr.io/openconfig/gnmic:0.5.2\n```\n\nExample running `gnmic get` command using the docker image:\n```bash\ndocker run \\\n       --network host \\\n       --rm ghcr.io/openconfig/gnmic get --log --username admin --password admin --insecure --address router1.local --path /interfaces\n```\n\n### Docker Compose\n\n`gnmic` docker-compose file example:\n\n```yaml\nversion: '2'\n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-1\n    networks:\n      - gnmic-net\n    volumes:\n      - ./gnmic.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n```\n\nSee [here](deployments/deployments_intro.md) for more deployment options\n"
  },
  {
    "path": "docs/stylesheets/extra.css",
    "content": ".md-typeset code {\n    background-color: transparent ;\n}"
  },
  {
    "path": "docs/user_guide/HA.md",
    "content": "\nMultiple instances of`gnmic` can be run in clustered mode in order to load share the targets connections and protect against failures.\n\nThe cluster mode allows `gnmic` to scale and be highly available at the same time\n\nTo join the cluster, the instances rely on a service discovery system and distributed KV store such as `Consul`,\n\n### Clustering process\n\nAt startup, all instances belonging to a cluster:\n  \n* Enter an election process in order to become the cluster leader.\n* Register their API service `gnmic-api` in a configured service discovery system.\n\nUpon becoming the leader:\n\n* The `gnmic` instance starts watching the registered `gnmic-api` services, \nand maintains a local cache of the active ones. These are essentially the instances restAPI addresses.\n* The leader then waits for `clustering/leader-wait-timer` to allow the other instances to register their API services as well. \nThis is useful in case an instance is slow to boot, which leaves it out of the initial load sharing process.\n* The leader then enters a \"target watch loop\" (`clustering/targets-watch-timer`), \nat each iteration the leader tries to determine if all configured targets are handled by an instance of the cluster, \nthis is done by checking if there is a lock maintained for each configured target.\n\nThe instances which failed to become the leader, continue to try to acquire the leader lock.\n### Target distribution process\n\nIf the leader detects that a target does not have a lock, it triggers the target distribution process:\n\n* Query all the targets keys from the KV store and calculate each instance load (number of maintained gNMI targets).\n* If the target configuration includes `tags`, the leader selects the instance with the most matching tags (in order). \nIf multiple instances have the same matching tags, the one with the lowest load is selected.\n* If the target doesn't have configured tags, the leader simply select the least loaded instance to handle the target's subscriptions.\n* Retrieve the selected instance API address from the local services cache.\n* Send both the target configuration as well as a target activation action to the selected instance.\n  \nWhen a cluster instance gets assigned a target (target activation):\n\n* Acquire a key lock for that specific target.\n* Once the lock is acquired, create the configured gNMI subscriptions.\n* Maintain the target lock for the duration of the gNMI subscription.\n\nThe whole target distribution process is repeated for each target missing a lock.\n\n### Configuration\n\nThe cluster configuration is as simple as:\n\n```yaml\n# rest api address, format \"address:port\"\napi: \"\"\n# clustering related configuration fields\nclustering:\n  # the cluster name, tells with instances belong to the same cluster\n  # it is used as part of the leader key lock, and the targets key locks\n  # if no value is configured, the value from flag --cluster-name is used.\n  # if the flag has the empty string as value, \"default-cluster\" is used.\n  cluster-name: default-cluster\n  # unique instance name within the cluster,\n  # used as the value in the target locks,\n  # used as the value in the leader lock.\n  # if no value is configured, the value from flag --instance-name is used.\n  # if the flag has the empty string as value, a value is generated in \n  # the format `gnmic-$UUID`\n  instance-name: \"\"\n  # service address to be registered in the locker(Consul)\n  # if not defined, it defaults to the address part of the API address:port\n  service-address: \"\"\n  # gnmic instances API service watch timer\n  # this is a long timer used by the cluster leader \n  # in a consul long-blocking query: \n  # https://www.consul.io/api-docs/features/blocking#implementation-details\n  services-watch-timer: 60s\n  # targets-watch-timer, targets watch timer, duration the leader waits \n  # between consecutive targets distributions\n  targets-watch-timer: 20s\n  # target-assignment-timeout, max time a leader waits for an instance to \n  # lock an assigned target.\n  # if the timeout is reached the leader unassigns the target and reselects \n  # a different instance.\n  target-assignment-timeout: 10s\n  # leader wait timer, allows to configure a wait time after an instance\n  # acquires the leader key.\n  # this wait time goal is to give more chances to other instances to register \n  # their API services before the target distribution starts\n  leader-wait-timer: 5s\n  # ordered list of strings to be added as tags during api service \n  # registration in addition to `cluster-name=${cluster-name}` and \n  # `instance-name=${instance-name}`\n  tags: []\n  # locker is used to configure the KV store used for \n  # service registration, service discovery, leader election and targets locks\n  locker:\n    # type of locker, only consul is supported currently\n    type: consul\n    # address of the locker server\n    address: localhost:8500\n    # Consul Data center, defaults to dc1\n    datacenter: \n    # Consul username, to be used as part of HTTP basicAuth\n    username:\n    # Consul password, to be used as part of HTTP basicAuth\n    password:\n    # Consul Token, is used to provide a per-request ACL token which overrides \n    # the agent's default token\n    token:\n    # session-ttl, session time-to-live after which a session is considered \n    # invalid if not renewed\n    # upon session invalidation, all services and locks created using this session\n    # are considered invalid.\n    session-ttl: 10s\n    # delay, a time duration (0s to 60s), in the event of  a session invalidation \n    # consul will prevent the lock from being acquired for this duration.\n    # The purpose is to allow a gnmic instance to stop active subscriptions before \n    # another one takes over.\n    delay: 5s\n    # retry-timer, wait period between retries to acquire a lock \n    # in the event of client failure, key is already locked or lock lost.\n    retry-timer: 2s\n    # renew-period, session renew period, must be lower that session-ttl. \n    # if the value is greater or equal than session-ttl, is will be set to half \n    # of session-ttl.\n    renew-period: 5s\n    # debug, enable extra logging messages\n    debug: false\n  # tls config for the REST API client\n  tls:\n    # string, path to the CA certificate file,\n    # this will be used to verify the certificates of the gNMIc cluster members\n    # when `skip-verify` is false\n    ca-file:\n    # string, client certificate file.\n    cert-file:\n    # string, client key file.\n    key-file:\n    # boolean, if true, the client will not verify the server\n    # certificate against the available certificate chain.\n    skip-verify: false\n```\n\nA `gnmic` instance creates gNMI subscriptions only towards targets for which it acquired locks. It is also responsible for maintaining that lock for the duration of the subscription.\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams//locking.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/locking.drawio\" async></script>\n\n### Instance affinity\n\nThe target distribution process can be influenced using `tags` added to the target configuration.\n\nBy default, `gnmic` instances register their API service with 2 tags;\n\n`cluster-name=${clustering/cluster-name}`\n`instance-name=${clustering/instance-name}`\n\nBy adding the same tags to a target `router1` configuration (below YAML), the cluster leader will \"assign\" `router1` to instance `gnmic1` in cluster `my-cluster` regardless of the instance load.\n\n```yaml\ntargets:\n  router1:\n    tags:\n      - cluster-name=my-cluster\n      - instance-name=gnmic1\n```\n\nCustom tags can be added to an instance API service registration in order to customize the instance affinity logic.\n\n```yaml\nclustering:\n  tags:\n    - my-custom-tag=value1\n```\n\n### Instance failure\n\nIn the event of an instance failure, its maintained targets locks expire, which on the next `clustering/targets-watch-timer` interval will be detected by the cluster leader.\n\nThe leader then performs the same target distribution process for those targets without a lock.\n\n### Leader reelection\n\nIf a cluster leader fails, one of the other instances in the cluster eventually acquires the leader lock and becomes the cluster leader.\n\nIt then, proceeds with the targets distribution process to assign the unhandled targets to an instance in the cluster.\n\n### Scalability\n\nUsing the same above-mentioned clustering mechanism, `gnmic` can horizontally scale the number of supported gNMI connections distributed across multiple `gnmic` instances.\n\nThe collected gNMI data can then be aggregated and made available through any of the running `gnmic` instances, regardless of whether that instance collected the data from the target or not.\n\nThe data aggregation is done by chaining `gnmic` [outputs](../user_guide/outputs/output_intro.md) and [inputs](../user_guide/inputs/input_intro.md) to build a gNMI data pipeline.\n\nIn the diagram below, the `gnmic` instances on the left and right side of NATS server can be identical.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams//scalability.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/scalability.drawio\" async></script>"
  },
  {
    "path": "docs/user_guide/actions/actions.md",
    "content": "# Actions\n\n`gNMIc` supports running actions as result of an event, possible triggering events are:\n\n- A gNMI SubscribeResponse or GetReponse message is received and matches certain criteria.\n- A target is discovered or deleted by a target loader.\n\nThere are 4 types of actions:\n\n- [http](#http-action): build and send an HTTP request\n- [gNMI](#gnmi-action): run a Get, Set or Subscribe ONCE gNMI RPC as a gNMI client\n- [template](#template-action): execute a Go template against the received input\n- [script](#script-action): run arbitrary shell scripts/commands.\n\nThe actions are executed in sequence.\n\nAn action can use the result of any previous action as one of it inputs using the [Go Template](https://golang.org/pkg/text/template/) syntax `{{ .Env.$action_name }}` or `{{ index .Env \"$action_name\"}}`\n\n### HTTP Action\n\nUsing the `HTTP action` you can send an HTTP request to a server.\n\nThe request body can be customized using [Go Templates](https://golang.org/pkg/text/template/) that take the event message or the discovered target as input.\n\n```yaml\nactions:\n  counter1_alert:\n    # action type\n    type: http\n    # HTTP method\n    method: POST\n    # target url, can be a go template\n    url: http://remote-server:8080/\n    # http headers to add to the request\n    headers: \n      content-type: application/text\n    # http request timeout\n    timeout: 5s\n    # go template used to build the request body.\n    # if left empty the whole event message is added as a json object to the request's body\n    body: '\"counter1\" crossed threshold, value={{ index .Values \"counter1\" }}'\n    # enable extra logging\n    debug: false\n```\n\n### gNMI Action\n\nUsing the `gNMI action` you can trigger a gNMI Get, Set or Subscribe ONCE RPC.\n\nJust like the `HTTP action` the RPC fields can be customized using [Go Templates](https://golang.org/pkg/text/template/)\n\n```yaml\nactions:\n  my_gnmi_action:\n    # action type\n    type: gnmi\n    # gNMI rpc, defaults to `get`, \n    # if `set` is used it will default to a set update.\n    # to trigger a set replace, use `set-replace`.\n    # `subscribe` is always a subscribe with mode=ONCE\n    # possible values: `get`, `set`, `set-update`, `set-replace`, `set-delete`, `sub`, `subscribe`\n    rpc: set\n    # the target router, it defaults to the value in tag \"source\"\n    # the value `all` means all known targets\n    target: '{{ index .Event.Tags \"source\" }}'\n    # paths templates to build xpaths\n    paths:\n      - | \n        {{ if eq ( index .Event.Tags \"interface_name\" ) \"ethernet-1/1\"}}\n          {{$interfaceName := \"ethernet-1/2\"}}\n        {{else}}\n          {{$interfaceName := \"ethernet-1/1\"}}\n        {{end}}\n        /interfaces/interface[name={{$interfaceName}}]/admin-state\n    # values templates to build the values in case of set-update or set-replace\n    values:\n      - \"enable\"\n    # data-type in case of get RPC, one of: ALL, CONFIG, STATE, OPERATIONAL\n    data-type: ALL\n    # gNMI encoding, defaults to json\n    encoding: json\n    # debug, enable extra logging\n    debug: false\n```\n\n### Template Action\n\nThe `Template action` allows to combine different data sources and produce custom payloads to be writen to a remote server or simply to a file.\n\nThe template is a Go Template that is executed against the `Input` message that triggered the action,\nany variable defined by the trigger processor\nas well as the results of any previous action.\n\n**Data**                      | **Template syntax**                                           |\n----------------------------- | --------------------------------------------------------------|\n**Input Messge**              | `{{ .Input }}`                                                |\n**Trigger Variables**         | `{{ .Vars }}`                                                 |\n**Previous actions results**  | `{{ .Env.$action_name }}` or `{{ index .Env \"$action_name\"}}` |\n\n```yaml\nactions:\n  awesome_template:\n    # action type\n    type: template\n    # template string, if not present template-file applies.\n    template: '{{ . }}'\n    # path to a file, or a glob.\n    # applies only if `.template `is not set.\n    # if not template and template-file are not set, \n    # the default template `{{ . }}` is used.\n    template-file:\n    # string, either `stdout` or a path to a file\n    # the result of executing to template will be written to the file\n    # specified by .output\n    output:\n    # debug, enable extra logging\n    debug: false\n```\n\n### Script Action\n\nThe `Script action` allows to run arbitrary scripts as a result of an event trigger.\n\nThe commands to be executed can be specified using the field `command`, e.g:\n\n```yaml\nactions:\n  weather:\n    type: script\n    shell: /bin/bash\n    command: | \n      curl wttr.in\n      curl cheat.sh\n```\n\nOr using the field `file`, e.g:\n\n```yaml\nactions:\n  exec:\n    type: script\n    file: ./my_executable_script.sh\n```\n\nWhen using `command`, the shell interpreter can be set using `shell` field. Otherwise it defaults to `/bin/bash`.\n\n### Examples\n\n#### Add basic configuration to targets upon discovery\n\nReferencing Actions under a target loader allows to run then in sequence when a target is discovered.\n\nThis allows to add some basic configuration to a target upon discovery before starting the gNMI subscriptions\n\nIn the below example, a `docker` loader is defined. It discovers Docker containers with label `clab-node-kind=srl` and adds them as gNMI targets.\nBefore the targets are added to the target's list for subscriptions, a list of actions are executed: `config_interfaces`, `config_subinterfaces` and `config_network_instances`\n\n```yaml\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nsubscriptions:\n  sub1:\n    paths:\n      - /interface/statistics\n      - /network-instance/statistics\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label: clab-node-kind=srl\n\n  on-add:\n    - config_interfaces\n    - config_sub_interfaces\n    - config_netins\n\noutputs:\n  out:\n    type: file\n    format: event\n    filename: /path/to/file\n\nactions:\n  config_interfaces:\n    name: config_interfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /interface[name=ethernet-1/1]/admin-state\n      - /interface[name=ethernet-1/2]/admin-state \n    values:\n      - enable\n      - enable\n  config_subinterfaces:\n    name: config_subinterfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /interface[name=ethernet-1/1]/subinterface[index=0]/admin-state\n      - /interface[name=ethernet-1/2]/subinterface[index=0]/admin-state \n    values:\n      - enable\n      - enable\n  config_network_instances:\n    name: config_network_instances\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /network-instance[name=default]/admin-state\n      - /network-instance[name=default]/interface\n      - /network-instance[name=default]/interface\n    values:\n      - enable\n      - '{\"name\": \"ethernet-1/1.0\"}'\n      - '{\"name\": \"ethernet-1/2.0\"}'\n```\n\n#### Clone a network topology and deploy it using containerlab\n\nUsing lldp neighbor information it's possible to build a containerlab topology using `gnmic` actions.\n\nIn the below confoguration file, an event processor called `clone-topology` is defined.\n\nWhen triggered it will run a series of actions to gather information (chassis type, lldp neighbors, configuration,...) from the defined targets.\n\nIt then builds a containerlab topology from a defined template and the gathered info, writes it to a file and runs a `clab deploy` command.\n\n```yaml\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\n# log: true\n\ntargets:\n  srl1:\n  srl2:\n  srl3:\n\nprocessors:\n  clone-topology:\n    event-trigger:\n      # debug: true\n      actions:\n        - chassis  \n        - lldp  \n        - read_config  \n        - write_config \n        - clab_topo         \n        - deploy_topo\n\nactions:\n  chassis:\n    name: chassis\n    type: gnmi\n    target: all\n    rpc: sub\n    encoding: json_ietf\n    #debug: true\n    format: event\n    paths:\n      - /platform/chassis/type\n  \n  lldp:\n    name: lldp\n    type: gnmi\n    target: all\n    rpc: sub\n    encoding: json_ietf\n    #debug: true\n    format: event\n    paths:\n      - /system/lldp/interface[name=ethernet-*]\n  \n  read_config:\n    name: read_config\n    type: gnmi\n    target: all\n    rpc: get\n    data-type: config\n    encoding: json_ietf\n    #debug: true\n    paths:\n      - /\n  \n  write_config:\n    name: write_config\n    type: template\n    template: |\n      {{- range $n, $m := .Env.read_config }}\n      {{- $filename := print $n  \".json\"}}\n          {{ file.Write $filename (index $m 0 \"updates\" 0 \"values\" \"\" | data.ToJSONPretty \"  \" ) }}\n          {{- end }}\n        #debug: true\n  \n  clab_topo:\n    name: clab_topo\n    type: template\n    #debug: true\n    output: gnmic.clab.yaml\n    template: |\n          name: gNMIc-action-generated\n  \n          topology:\n            defaults:\n              kind: srl\n            kinds:\n              srl:\n                image: ghcr.io/nokia/srlinux:latest\n  \n            nodes:\n          {{- range $n, $m := .Env.lldp }}\n            {{- $type := index $.Env.chassis $n 0 0 \"values\" \"/srl_nokia-platform:platform/srl_nokia-platform-chassis:chassis/type\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-D1\" \"ixrd1\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-D2\" \"ixrd2\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-D3\" \"ixrd3\" }}\n            {{- $type = $type | strings.ReplaceAll \"7250 IXR-6\" \"ixr6\" }}\n            {{- $type = $type | strings.ReplaceAll \"7250 IXR-10\" \"ixr10\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-H1\" \"ixrh1\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-H2\" \"ixrh2\" }}\n            {{- $type = $type | strings.ReplaceAll \"7220 IXR-H3\" \"ixrh3\" }}\n              {{ $n | strings.TrimPrefix \"clab-test1-\" }}:\n                type: {{ $type }}\n                startup-config: {{ print $n \".json\"}}\n          {{- end }}\n          \n            links:\n          {{- range $n, $m := .Env.lldp }}\n            {{- range $rsp := $m }}\n              {{- range $ev := $rsp }}\n                {{- if index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name\" }}\n                {{- $node1 := $ev.tags.source | strings.TrimPrefix \"clab-test1-\" }}\n                {{- $iface1 := $ev.tags.interface_name | strings.ReplaceAll \"ethernet-\" \"e\" | strings.ReplaceAll \"/\" \"-\" }}\n                {{- $node2 := index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name\" }}\n                {{- $iface2 := index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/port-id\" | strings.ReplaceAll \"ethernet-\" \"e\" | strings.ReplaceAll \"/\" \"-\" }}\n                  {{- if lt $node1 $node2 }}\n              - endpoints: [\"{{ $node1 }}:{{ $iface1 }}\", \"{{ $node2 }}:{{ $iface2 }}\"]\n                  {{- end }}\n                {{- end }}\n              {{- end }}\n            {{- end }}\n          {{- end }}\n    \n  deploy_topo:  \n    name: deploy_topo\n    type: script\n    command: sudo clab dep -t gnmic.clab.yaml --reconfigure\n    debug: true\n```\n\nThe above described processor can be triggered with the below command:\n\n```bash\ngnmic --config clone.yaml get --path /system/name --processor clone-topology\n```\n"
  },
  {
    "path": "docs/user_guide/api/api_intro.md",
    "content": "A limited set of REST endpoints are supported, these are mainly used to allow for a clustered deployment for multiple `gnmic` instances.\n\nThe API can be used to automate (to a certain extent) the targets configuration loading and starting/stopping subscriptions.\n\n## Configuration\n\nEnabling the API server can be done via a command line flag:\n\n```bash\ngnmic --config gnmic.yaml subscribe --api \":7890\"\n```\n\nvia ENV variable: `GNMIC_API=':7890'`\n\nOr via file configuration, by adding the below line to the config file:\n\n```yaml\napi: \":7890\"\n```\n\nMore advanced API configuration options (like a secure API Server)\ncan be achieved by setting the fields under `api-server`.\n\n```yaml\napi-server:\n  # string, in the form IP:port, the IP part can be omitted.\n  # if not set, it defaults to the value of `api` in the file main level.\n  # if `api` is not set, the default is `:7890`\n  address: :7890\n  # duration, the server timeout.\n  # The set value is equally split between read and write timeouts\n  timeout: 10s\n  # tls config\n  tls:\n    # string, path to the CA certificate file,\n    # this certificate is used to verify the clients certificates.\n    ca-file:\n    # string, server certificate file.\n    cert-file:\n    # string, server key file.\n    key-file:\n    # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n    #  - request:         The server requests a certificate from the client but does not \n    #                     require the client to send a certificate. \n    #                     If the client sends a certificate, it is not required to be valid.\n    #  - require:         The server requires the client to send a certificate and does not \n    #                     fail if the client certificate is not valid.\n    #  - verify-if-given: The server requests a certificate, \n    #                     does not fail if no certificate is sent. \n    #                     If a certificate is sent it is required to be valid.\n    #  - require-verify:  The server requires the client to send a valid certificate.\n    #\n    # if no ca-file is present, `client-auth` defaults to \"\"`\n    # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n    client-auth: \"\"\n  # boolean, if true, the server will also handle the path /metrics and serve \n  # gNMIc's enabled prometheus metrics.\n  enable-metrics: false\n  # boolean, enables extra debug log printing\n  debug: false\n  # boolean, disables creating log messages when accessing the `healthz` path\n  healthz-disable-logging: false\n```\n\n## API Endpoints\n\n* [Configuration](./configuration.md)\n\n* [Targets](./targets.md)\n\n* [Cluster](./cluster.md)\n\n* [Other](./other.md)\n"
  },
  {
    "path": "docs/user_guide/api/cluster.md",
    "content": "# Cluster\n\n## /api/v1/cluster\n\n### `GET /api/v1/cluster`\n\nRequest gNMIc cluster state and details.\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/cluster\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"name\": \"collectors\",\n        \"number-of-locked-targets\": 70,\n        \"leader\": \"clab-telemetry-gnmic1\",\n        \"members\": [\n            {\n                \"name\": \"clab-telemetry-gnmic1\",\n                \"api-endpoint\": \"clab-telemetry-gnmic1:7890\",\n                \"is-leader\": true,\n                \"number-of-locked-nodes\": 23,\n                \"locked-targets\": [\n                    \"clab-lab2-leaf6\",\n                    \"clab-lab5-spine2\",\n                    \"clab-lab4-leaf4\",\n                    \"clab-lab2-leaf8\",\n                    \"clab-lab3-leaf2\",\n                    \"clab-lab5-spine1\",\n                    \"clab-lab1-spine1\",\n                    \"clab-lab2-super-spine2\",\n                    \"clab-lab3-super-spine1\",\n                    \"clab-lab4-spine3\",\n                    \"clab-lab2-spine3\",\n                    \"clab-lab3-leaf7\",\n                    \"clab-lab5-leaf7\",\n                    \"clab-lab5-leaf8\",\n                    \"clab-lab1-spine2\",\n                    \"clab-lab4-leaf8\",\n                    \"clab-lab4-leaf1\",\n                    \"clab-lab4-spine1\",\n                    \"clab-lab2-spine2\",\n                    \"clab-lab3-spine2\",\n                    \"clab-lab1-leaf8\",\n                    \"clab-lab3-leaf8\",\n                    \"clab-lab4-leaf2\"\n                ]\n            },\n            {\n                \"name\": \"clab-telemetry-gnmic2\",\n                \"api-endpoint\": \"clab-telemetry-gnmic2:7891\",\n                \"number-of-locked-nodes\": 24,\n                \"locked-targets\": [\n                    \"clab-lab3-leaf6\",\n                    \"clab-lab1-leaf7\",\n                    \"clab-lab2-leaf3\",\n                    \"clab-lab5-leaf5\",\n                    \"clab-lab1-super-spine1\",\n                    \"clab-lab3-leaf5\",\n                    \"clab-lab4-super-spine1\",\n                    \"clab-lab5-leaf6\",\n                    \"clab-lab2-spine1\",\n                    \"clab-lab3-leaf3\",\n                    \"clab-lab4-leaf3\",\n                    \"clab-lab2-leaf4\",\n                    \"clab-lab4-super-spine2\",\n                    \"clab-lab1-spine3\",\n                    \"clab-lab3-leaf4\",\n                    \"clab-lab5-spine4\",\n                    \"clab-lab1-leaf4\",\n                    \"clab-lab2-leaf2\",\n                    \"clab-lab2-super-spine1\",\n                    \"clab-lab4-spine4\",\n                    \"clab-lab5-leaf2\",\n                    \"clab-lab5-leaf4\",\n                    \"clab-lab4-leaf7\",\n                    \"clab-lab1-spine4\"\n                ]\n            },\n                {\n                \"name\": \"clab-telemetry-gnmic3\",\n                \"api-endpoint\": \"clab-telemetry-gnmic3:7892\",\n                \"number-of-locked-nodes\": 23,\n                \"locked-targets\": [\n                    \"clab-lab1-leaf5\",\n                    \"clab-lab3-spine3\",\n                    \"clab-lab1-leaf1\",\n                    \"clab-lab2-spine4\",\n                    \"clab-lab1-super-spine2\",\n                    \"clab-lab5-leaf3\",\n                    \"clab-lab4-spine2\",\n                    \"clab-lab1-leaf3\",\n                    \"clab-lab5-spine3\",\n                    \"clab-lab3-super-spine2\",\n                    \"clab-lab2-leaf5\",\n                    \"clab-lab1-leaf2\",\n                    \"clab-lab1-leaf6\",\n                    \"clab-lab4-leaf5\",\n                    \"clab-lab2-leaf7\",\n                    \"clab-lab3-leaf1\",\n                    \"clab-lab2-leaf1\",\n                    \"clab-lab3-spine1\",\n                    \"clab-lab5-leaf1\",\n                    \"clab-lab5-super-spine2\",\n                    \"clab-lab4-leaf6\",\n                    \"clab-lab3-spine4\",\n                    \"clab-lab5-super-spine1\"\n                ]\n            }\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `POST /api/v1/cluster/rebalance`\n\nIf the cluster load is not balanced it moves targets from the high load instances to the low load instances.\n\n=== \"Request\"\n    ```bash\n    curl --request POST gnmic-api-address:port/api/v1/cluster/rebalance\n    ```\n=== \"200 OK\"\n    ```\n    ```\n=== \"400 Bad Request\"\n    ```json\n    {\n        \"errors\": [\n            \"not leader\"\n        ]\n    }\n    ```\n\n### `GET /api/v1/cluster/leader`\n\nReturns the cluster leader details.\n\n=== \"Request\"\n    ```bash\n    curl --request POST gnmic-api-address:port/api/v1/cluster/leader\n    ```\n=== \"200 OK\"\n    ```json\n    [\n        {\n            \"name\": \"clab-telemetry-gnmic1\",\n            \"api-endpoint\": \"http://clab-telemetry-gnmic1:7890\",\n            \"is-leader\": true,\n            \"number-of-locked-nodes\": 23,\n            \"locked-targets\": [\n                \"clab-lab4-leaf8\",\n                \"clab-lab5-leaf8\",\n                \"clab-lab1-spine2\",\n                \"clab-lab3-leaf7\",\n                \"clab-lab4-leaf4\",\n                \"clab-lab2-leaf8\",\n                \"clab-lab2-spine3\",\n                \"clab-lab4-leaf1\",\n                \"clab-lab4-leaf2\",\n                \"clab-lab4-spine3\",\n                \"clab-lab5-spine2\",\n                \"clab-lab1-spine1\",\n                \"clab-lab2-leaf6\",\n                \"clab-lab5-leaf7\",\n                \"clab-lab1-leaf8\",\n                \"clab-lab3-leaf8\",\n                \"clab-lab3-spine2\",\n                \"clab-lab3-super-spine1\",\n                \"clab-lab5-spine1\",\n                \"clab-lab2-super-spine2\",\n                \"clab-lab3-leaf2\",\n                \"clab-lab2-spine2\",\n                \"clab-lab4-spine1\"\n            ]\n        }\n    ]\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `DELETE /api/v1/cluster/leader`\n\nForces the cluster leader to free its lock to allow another instance to become the leader.\n\n=== \"Request\"\n    ```bash\n    curl --request DELETE gnmic-api-address:port/api/v1/cluster/leader\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n\n## /api/v1/cluster/members\n\n### `GET /api/v1/cluster/members`\n\nQuery gNMIc cluster members\n\nReturns a list of gNMIc cluster members with details\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/cluster/members\n    ```\n=== \"200 OK\"\n    ```json\n    [\n        {\n            \"name\": \"clab-telemetry-gnmic1\",\n            \"api-endpoint\": \"http://clab-telemetry-gnmic1:7890\",\n            \"is-leader\": true,\n            \"number-of-locked-nodes\": 23,\n            \"locked-targets\": [\n                \"clab-lab2-spine3\",\n                \"clab-lab5-spine1\",\n                \"clab-lab2-super-spine2\",\n                \"clab-lab4-leaf2\",\n                \"clab-lab4-leaf4\",\n                \"clab-lab5-spine2\",\n                \"clab-lab1-leaf8\",\n                \"clab-lab4-spine1\",\n                \"clab-lab5-leaf7\",\n                \"clab-lab2-spine2\",\n                \"clab-lab3-super-spine1\",\n                \"clab-lab1-spine1\",\n                \"clab-lab3-leaf2\",\n                \"clab-lab3-spine2\",\n                \"clab-lab2-leaf6\",\n                \"clab-lab4-leaf1\",\n                \"clab-lab4-spine3\",\n                \"clab-lab1-spine2\",\n                \"clab-lab2-leaf8\",\n                \"clab-lab3-leaf8\",\n                \"clab-lab5-leaf8\",\n                \"clab-lab3-leaf7\",\n                \"clab-lab4-leaf8\"\n            ]\n        },\n        {\n            \"name\": \"clab-telemetry-gnmic2\",\n            \"api-endpoint\": \"http://clab-telemetry-gnmic2:7891\",\n            \"number-of-locked-nodes\": 24,\n            \"locked-targets\": [\n                \"clab-lab1-spine4\",\n                \"clab-lab2-leaf2\",\n                \"clab-lab3-leaf3\",\n                \"clab-lab4-super-spine1\",\n                \"clab-lab5-leaf4\",\n                \"clab-lab1-spine3\",\n                \"clab-lab1-leaf4\",\n                \"clab-lab3-leaf6\",\n                \"clab-lab5-leaf2\",\n                \"clab-lab2-leaf4\",\n                \"clab-lab3-leaf4\",\n                \"clab-lab4-leaf3\",\n                \"clab-lab5-spine4\",\n                \"clab-lab3-leaf5\",\n                \"clab-lab4-super-spine2\",\n                \"clab-lab1-leaf7\",\n                \"clab-lab2-leaf3\",\n                \"clab-lab2-super-spine1\",\n                \"clab-lab5-leaf6\",\n                \"clab-lab2-spine1\",\n                \"clab-lab1-super-spine1\",\n                \"clab-lab4-leaf7\",\n                \"clab-lab4-spine4\",\n                \"clab-lab5-leaf5\"\n            ]\n        },\n        {\n            \"name\": \"clab-telemetry-gnmic3\",\n            \"api-endpoint\": \"http://clab-telemetry-gnmic3:7892\",\n            \"number-of-locked-nodes\": 23,\n            \"locked-targets\": [\n                \"clab-lab1-leaf3\",\n                \"clab-lab1-leaf5\",\n                \"clab-lab3-spine4\",\n                \"clab-lab3-spine3\",\n                \"clab-lab1-leaf1\",\n                \"clab-lab1-leaf6\",\n                \"clab-lab2-leaf5\",\n                \"clab-lab4-leaf6\",\n                \"clab-lab5-leaf1\",\n                \"clab-lab5-leaf3\",\n                \"clab-lab5-super-spine2\",\n                \"clab-lab2-spine4\",\n                \"clab-lab5-super-spine1\",\n                \"clab-lab4-spine2\",\n                \"clab-lab3-spine1\",\n                \"clab-lab4-leaf5\",\n                \"clab-lab5-spine3\",\n                \"clab-lab1-super-spine2\",\n                \"clab-lab2-leaf1\",\n                \"clab-lab3-super-spine2\",\n                \"clab-lab3-leaf1\",\n                \"clab-lab1-leaf2\",\n                \"clab-lab2-leaf7\"\n            ]\n        }\n    ]\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `POST /api/v1/cluster/members/{id}/drain`\n\nDrains the instance `id` from its targets, moving them to the other instances in the cluster.\n\n=== \"Request\"\n    ```bash\n    curl --request POST gnmic-api-address:port/api/v1/cluster/members/{id}/drain\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n"
  },
  {
    "path": "docs/user_guide/api/configuration.md",
    "content": "\n# Configuration\n\n## /api/v1/config\n\n### `GET /api/v1/config`\n\nRequest all gnmic configuration\n\nReturns the whole configuration as json\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/config\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"username\": \"admin\",\n        \"password\": \"admin\",\n        \"port\": \"57400\",\n        \"encoding\": \"json_ietf\",\n        \"insecure\": true,\n        \"timeout\": 10000000000,\n        \"log\": true,\n        \"max-msg-size\": 536870912,\n        \"prometheus-address\": \":8989\",\n        \"retry\": 10000000000,\n        \"api\": \":7890\",\n        \"get-type\": \"ALL\",\n        \"set-delimiter\": \":::\",\n        \"subscribe-mode\": \"stream\",\n        \"subscribe-stream-mode\": \"target-defined\",\n        \"subscribe-cluster-name\": \"default-cluster\",\n        \"subscribe-lock-retry\": 5000000000,\n        \"path-path-type\": \"xpath\",\n        \"prompt-max-suggestions\": 10,\n        \"prompt-prefix-color\": \"dark_blue\",\n        \"prompt-suggestions-bg-color\": \"dark_blue\",\n        \"prompt-description-bg-color\": \"dark_gray\",\n        \"targets\": {\n            \"192.168.1.131:57400\": {\n                \"name\": \"192.168.1.131:57400\",\n                \"address\": \"192.168.1.131:57400\",\n                \"username\": \"admin\",\n                \"password\": \"admin\",\n                \"timeout\": 10000000000,\n                \"insecure\": true,\n                \"skip-verify\": false,\n                \"buffer-size\": 1000,\n                \"retry-timer\": 10000000000\n            },\n            \"192.168.1.132:57400\": {\n                \"name\": \"192.168.1.132:57400\",\n                \"address\": \"192.168.1.131:57400\",\n                \"username\": \"admin\",\n                \"password\": \"admin\",\n                \"timeout\": 10000000000,\n                \"insecure\": true,\n                \"skip-verify\": false,\n                \"buffer-size\": 1000,\n                \"retry-timer\": 10000000000\n            }\n        },\n        \"subscriptions\": {\n            \"sub1\": {\n                \"name\": \"sub1\",\n                \"paths\": [\n                    \"/interface/statistics\"\n                ],\n                \"mode\": \"stream\",\n                \"stream-mode\": \"sample\",\n                \"encoding\": \"json_ietf\",\n                \"sample-interval\": 1000000000\n            }\n        },\n        \"Outputs\": {\n            \"output2\": {\n                \"address\": \"192.168.1.131:4222\",\n                \"format\": \"event\",\n                \"subject\": \"telemetry\",\n                \"type\": \"nats\",\n                \"write-timeout\": \"10s\"\n            }\n        },\n        \"inputs\": {},\n        \"processors\": {},\n        \"clustering\": {\n            \"cluster-name\": \"cluster1\",\n            \"instance-name\": \"gnmic1\",\n            \"service-address\": \"gnmic1\",\n            \"services-watch-timer\": 60000000000,\n            \"targets-watch-timer\": 5000000000,\n            \"leader-wait-timer\": 5000000000,\n            \"locker\": {\n                \"address\": \"consul-agent:8500\",\n                \"type\": \"consul\"\n            }\n        }\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n## /api/v1/config/targets\n\n### `GET /api/v1/config/targets`\n\nRequest all targets configuration\n\nreturns the targets configuration as json\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/config/targets\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"192.168.1.131:57400\": {\n            \"name\": \"192.168.1.131:57400\",\n            \"address\": \"192.168.1.131:57400\",\n            \"username\": \"admin\",\n            \"password\": \"admin\",\n            \"timeout\": 10000000000,\n            \"insecure\": true,\n            \"skip-verify\": false,\n            \"buffer-size\": 1000,\n            \"retry-timer\": 10000000000\n        },\n        \"192.168.1.132:57400\": {\n            \"name\": \"192.168.1.132:57400\",\n            \"address\": \"192.168.1.131:57400\",\n            \"username\": \"admin\",\n            \"password\": \"admin\",\n            \"timeout\": 10000000000,\n            \"insecure\": true,\n            \"skip-verify\": false,\n            \"buffer-size\": 1000,\n            \"retry-timer\": 10000000000\n        }\n    }\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"no targets found\",\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `GET /api/v1/config/targets/{id}`\n\nRequest a single target configuration\n\nReturns a single target configuration as json, where {id} is the target ID\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"name\": \"192.168.1.131:57400\",\n        \"address\": \"192.168.1.131:57400\",\n        \"username\": \"admin\",\n        \"password\": \"admin\",\n        \"timeout\": 10000000000,\n        \"insecure\": true,\n        \"skip-verify\": false,\n        \"buffer-size\": 1000,\n        \"retry-timer\": 10000000000\n    }\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"target $target not found\",\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `POST /api/v1/config/targets`\n\nAdd a new target to gnmic configuration\n\nExpected request body is a single target config as json\n\nReturns an empty body if successful.\n\n=== \"Request\"\n    ```bash\n    curl --request POST -H \"Content-Type: application/json\" \\\n         -d '{\"name\": \"10.10.10.10:57400\", \"address\": \"10.10.10.10:57400\", \"username\": \"admin\", \"password\": \"admin\", \"insecure\": true}' \\\n         gnmic-api-address:port/api/v1/config/targets\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n=== \"400 Bad Request\"\n    ```json\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n### `DELETE /api/v1/config/targets/{id}`\n  \nDeletes a target {id} configuration, all active subscriptions are terminated.\n\nReturns an empty body\n\n=== \"Request\"\n    ```bash\n    curl --request DELETE gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n\n### `PATCH /api/v1/config/targets/{id}/subscriptions`\n\nUpdates existing subscriptions for the target ID\n    \nReturns an empty body if successful.\n\n=== \"Request\"\n    ```bash\n    curl --request PATCH gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400/subscriptions -d '{\"subscriptions\": [\"sub1\", \"sub2\"]}'\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"target $target not found\"\n        ]\n    }\n    ```\n=== \"400 Bad Request\"\n    ```json\n    {\n        \"errors\": [\n            \"subscription $subscription does not exist\"\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n## /api/v1/config/subscriptions\n\n### `GET /api/v1/config/subscriptions`\n\nRequest all the configured subscriptions.\n\nReturns the subscriptions configuration as json\n\n## /api/v1/config/outputs\n\n### `GET /api/v1/config/outputs`\n\nRequest all the configured outputs.\n\nReturns the outputs configuration as json\n\n## /api/v1/config/inputs\n\n### `GET /api/v1/config/inputs`\n\nRequest all the configured inputs.\n\nReturns the outputs configuration as json\n\n## /api/v1/config/processors\n\n### `GET /api/v1/config/processors`\n\nRequest all the configured processors.\n\nReturns the processors configuration as json\n\n## /api/v1/config/clustering\n\n### `GET /api/v1/config/clustering`\n\nRequest the clustering configuration.\n\nReturns the clustering configuration as json\n"
  },
  {
    "path": "docs/user_guide/api/other.md",
    "content": "# Other\n\n## /api/v1/healthz\n\n### `GET /api/v1/healthz`\n\nHealth check endpoint for Kubernetes or similar\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/healthz\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"status\": \"healthy\"\n    }\n    ```\n    \n## /api/v1/admin/shutdown\n\n### `POST /api/v1/admin/shutdown`\n\nGracefully shut down the application\n\n=== \"Request\"\n    ```bash\n    curl --request POST gnmic-api-address:port/api/v1/admin/shutdown\n    ```\n"
  },
  {
    "path": "docs/user_guide/api/targets.md",
    "content": "## `GET /api/v1/targets`\n\nRequest all active targets details.\n\nReturns all active targets as json\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/api/v1/targets\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"192.168.1.131:57400\": {\n            \"config\": {\n                \"name\": \"192.168.1.131:57400\",\n                \"address\": \"192.168.1.131:57400\",\n                \"username\": \"admin\",\n                \"password\": \"admin\",\n                \"timeout\": 10000000000,\n                \"insecure\": true,\n                \"skip-verify\": false,\n                \"buffer-size\": 1000,\n                \"retry-timer\": 10000000000\n            },\n            \"subscriptions\": {\n                \"sub1\": {\n                    \"name\": \"sub1\",\n                    \"paths\": [\n                        \"/interface/statistics\"\n                    ],\n                    \"mode\": \"stream\",\n                    \"stream-mode\": \"sample\",\n                    \"encoding\": \"json_ietf\",\n                    \"sample-interval\": 1000000000\n                }\n            }\n        },\n        \"192.168.1.131:57401\": {\n            \"config\": {\n                \"name\": \"192.168.1.131:57401\",\n                \"address\": \"192.168.1.131:57401\",\n                \"username\": \"admin\",\n                \"password\": \"admin\",\n                \"timeout\": 10000000000,\n                \"insecure\": true,\n                \"skip-verify\": false,\n                \"buffer-size\": 1000,\n                \"retry-timer\": 10000000000\n            },\n            \"subscriptions\": {\n                \"sub1\": {\n                    \"name\": \"sub1\",\n                \"paths\": [\n                    \"/interface/statistics\"\n                ],\n                \"mode\": \"stream\",\n                \"stream-mode\": \"sample\",\n                \"encoding\": \"json_ietf\",\n                \"sample-interval\": 1000000000\n                }\n            }\n        }\n    }\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"no targets found\"\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n## `GET /api/v1/targets/{id}`\n\nQuery a single target details, if active.\n\nReturns a single target if active as json, where {id} is the target ID\n\n=== \"Request\"\n    ```bash\n    curl --request GET gnmic-api-address:port/targets/192.168.1.131:57400\n    ```\n=== \"200 OK\"\n    ```json\n    {\n        \"config\": {\n            \"name\": \"192.168.1.131:57400\",\n            \"address\": \"192.168.1.131:57400\",\n            \"username\": \"admin\",\n            \"password\": \"admin\",\n            \"timeout\": 10000000000,\n            \"insecure\": true,\n            \"skip-verify\": false,\n            \"buffer-size\": 1000,\n            \"retry-timer\": 10000000000\n        },\n        \"subscriptions\": {\n            \"sub1\": {\n                \"name\": \"sub1\",\n                \"paths\": [\n                    \"/interface/statistics\"\n                ],\n                \"mode\": \"stream\",\n                \"stream-mode\": \"sample\",\n                \"encoding\": \"json_ietf\",\n                \"sample-interval\": 1000000000\n            }\n        }\n    }\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"no targets found\"\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n## `POST /api/v1/targets/{id}`\n\nStarts a single target subscriptions, where {id} is the target ID\n\nReturns an empty body if successful.\n\n=== \"Request\"\n    ```bash\n    curl --request POST gnmic-api-address:port/api/v1/targets/192.168.1.131:57400\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"target $target not found\"\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n## `DELETE /api/v1/targets/{id}`\n  \nStops a single target active subscriptions, where {id} is the target ID\n    \nReturns an empty body if successful.\n\n=== \"Request\"\n    ```bash\n    curl --request DELETE gnmic-api-address:port/api/v1/targets/192.168.1.131:57400\n    ```\n=== \"200 OK\"\n    ```json\n    ```\n=== \"404 Not found\"\n    ```json\n    {\n        \"errors\": [\n            \"target $target not found\"\n        ]\n    }\n    ```\n=== \"500 Internal Server Error\"\n    ```json\n    {\n        \"errors\": [\n            \"Error Text\"\n        ]\n    }\n    ```\n\n"
  },
  {
    "path": "docs/user_guide/caching.md",
    "content": "\n`Caching` refers to the process of storing the collected gNMI updates before sending them out to the intended output(s).\n\nBy default, `gNMIc` outputs send out the received gNMI updates as they arrive (i.e without storing them).\n\nA cache is used to store the received updates when the [`gnmi-server`](gnmi_server.md) functionality is enabled and (optionally) when `influxdb` and `prometheus` outputs are enabled to allow for advanced data pipeline processing.\n\nCaching messages before writing them to a remote location allows implementing a few use cases like **rate limiting**, **batch processing**, **data replication**, etc.\n\nCaching support for other outputs is planned.\n\n### How does it work?\n\nWhen caching is enabled for a certain output, the received gNMI updates are not written directly to the output remote server (for e.g: InfluxDB server), but rather cached locally until the `cache-flush-timer` is reached (in the case of an `influxdb` output) or when the output receives a `Prometheus` scrape request (in the case of a `prometheus` output).\n\nThe below diagram shows how an InfluxDB output works with and without cache enabled:\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:10,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/influxdb_output_with_without_cache.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/influxdb_output_with_without_cache.drawio\" async></script>\n\nThe cached gNMI updates are periodically retrieved from the cache in batch then converted to [events](event_processors/intro.md#the-event-format).\n\nIf [processors](event_processors/intro.md) are defined under the output config section, they are applied to the whole list of events at once. This allows for augmentation of messages with values from other messages even if they where received in separate updates or collected from a different target/subscription.\n\n### Enable caching\n\n#### gnmi-server\n\nThe gNMI server has caching enabled by default.\nThe cache type and its behavior can be tweaked, see [here](#cache-types)\n\n```yaml\ngnmi-server:\n  #\n  # other gnmi-server related attributes\n  #\n  cache: {}\n```\n\n#### outputs\n\nCaching can be enabled per output by populating the `cache` attribute under the desired output:\n\n```yaml\noutputs:\n  output1:\n    type: prometheus\n    #\n    # other output related attributes\n    #\n    cache: {}\n```\n\nThis enables `output1` to use a cache of type [`oc`](#gnmi-cache).\n\nEach output has its own cache.\nUsing a single global cache will be implemented in a future release.\n\n### Distributed caches\n\nWhen running multiple instances of `gNMIc` it's possible to synchronize the collected data between all the instances using a distributed cache.\n\nEach output that is configured with a remote cache will write the collected gNMI updates to the remote cache first, then syncs back all the cached data to its local cache then eventually write it to the output.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:10,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/distributed_caches.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/distributed_caches.drawio\" async></script>\n\n(1) The received gNMI updates are written to the remote cache.\n\n(2) The output syncs the remote cache data to its local cache.\n\n(3) The locally cached data is written to the remote output periodically or on scape request.\n\nThis is useful when different instances collect data from different targets and/or subscriptions. A single instance can be responsible for writing all the collected data to the output or each instance would be writing to a different output.\n\n### Cache types\n\n`gNMIc` supports 4 cache types. There is 1 local cache and 3 distributed caches \"flavors\".\n\nThe choice of cache to use depends on the use case you are trying to implement.\n\nA local cache is local to the `gNMIc` instance i.e not exposed externally,\nwhile a distributed cache is external to the `gNMIc` instance, potentially shared by multiple `gNMIc` instances and is always combined with a local cache to sync updates between `gNMIc` instances.\n\n#### gNMI cache (local)\n\nIs an in-memory gNMI cache based on the Openconfig gNMI cache published [here](https://github.com/openconfig/gnmi/tree/master/cache)\n\nThis type of cache is ideal when running a single `gNMIc` instance. It is also the default cache type for the gNMI server and for an output when caching is enabled.\n\nConfiguration:\n\n```yaml\noutputs:\n  output1:\n    type: prometheus # or influxdb\n    #\n    # other output related fields\n    #\n    cache: \n      type: oc\n      # duration, default: 60s.\n      # updates older than the expiration value will not be read from the cache.\n      expiration: 60s\n      # enable extra logging\n      debug: false\n```\n\n#### NATS cache (distributed)\n\nIs a cache type that relies on a [NATS server](https://docs.nats.io/) to distribute the collected updates between `gNMIc` instances.\n\nThis type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths.\n\nConfiguration:\n\n```yaml\noutputs:\n  output1:\n    type: prometheus # or influxdb\n    #\n    # other output related fields\n    #\n    cache:\n      type: nats\n      # string, address of the remote NATS server,\n      # if left empty an in memory NATS server will be created an used.\n      address:\n      # string, the NATS server username.\n      username:\n      # string, the NATS server password.\n      password:\n      # string, expiration period of received messages.\n      expiration: 60s\n      # enable extra logging\n      debug: false\n```\n\n#### JetStream cache (distributed)\n\nIs a cache type that relies on a [JetStream server](https://docs.nats.io/nats-concepts/jetstream) to distribute the collected updates between `gNMIc` instances.\n\nThis type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths.\n\nIt is planned to add [gNMI historical subscriptions](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose) support using the `jetstream` cache type.\n\nConfiguration:\n\n```yaml\noutputs:\n  output1:\n    type: prometheus # or influxdb\n    #\n    # other output related fields\n    #\n    cache:\n      type: jetstream\n      # string, address of the remote NATS JetStream server,\n      # if left empty an in memory NATS JetStream server will be created an used.\n      address:\n      # string, the JetStream server username.\n      username:\n      # string, the JetStream server password.\n      password:\n      # duration, default: 60s.\n      # Expiration period of received messages.\n      expiration: 60s\n      # int64, default: 1073741824 (1 GiB). \n      # Max number of bytes stored in the cache per subscription.\n      max-bytes:\n      # int64, default: 1048576. \n      # Max number of messages stored per subscription.\n      max-msgs-per-subscription:\n      # int, default 100. \n      # Batch size used by the JetStream pull subscriber.\n      fetch-batch-size:\n      # duration, default 100ms. \n      # Wait time used by the JetStream pull subscriber.\n      fetch-wait-time:\n      # enable extra logging\n      debug: false      \n```\n\n#### Redis cache (distributed)\n\nIs a cache type that relies on a [Redis PUBSUB server](https://redis.io/docs/manual/pubsub/) to distribute the collected updates between `gNMIc` instances.\n\nThis type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths.\n\n```yaml\noutputs:\n  output1:\n    type: prometheus # or influxdb\n    #\n    # other output related fields\n    #\n    cache:\n      type: redis\n      # string, redis server address\n      address:\n      # string, the Redis server username.\n      username:\n      # string, the Redis server password.\n      password:\n      # duration, default: 60s.\n      # Expiration period of received messages.\n      expiration: 60s\n      # enable extra logging\n      debug: false\n```\n"
  },
  {
    "path": "docs/user_guide/collector/collector_api.md",
    "content": "# Collector REST API\n\nThe collector exposes a REST API for dynamic configuration management and status queries. This API is specific to the collector mode and differs from the API available in subscribe mode.\n\n## Base URL\n\nAll API endpoints are prefixed with `/api/v1`. For example, if the API server is running on `localhost:7890`:\n\n```\nhttp://localhost:7890/api/v1/targets\n```\n\n## Authentication\n\nIf TLS is configured with client authentication, requests must include valid client certificates.\n\n## Common Response Formats\n\n### Success Response\n\nMost successful responses return JSON with HTTP status 200.\n\n### Error Response\n\nError responses include an `errors` array:\n\n```json\n{\n  \"errors\": [\"error message 1\", \"error message 2\"]\n}\n```\n\n---\n\n## Health & Admin Endpoints\n\n### Health Check\n\n```\nGET /api/v1/healthz\n```\n\nReturns the health status of the collector.\n\n**Response:** `200 OK` if healthy\n\n### Shutdown\n\n```\nPOST /api/v1/admin/shutdown\n```\n\nNot implemented in Collector mode\n\n---\n\n## Configuration Endpoints\n\n### Get Full Configuration\n\n```\nGET /api/v1/config\n```\n\nReturns the current configuration of the collector.\n\n### Apply Configuration\n\n```\nPOST /api/v1/config/apply\n```\n\nApplies a complete configuration to the collector. Resources not included in the request are deleted.\n\n**Request Body:**\n\n```json\n{\n  \"targets\": {\n    \"router1\": {\n      \"address\": \"10.0.0.1:57400\",\n      \"username\": \"admin\",\n      \"password\": \"admin\",\n      \"skip-verify\": true,\n      \"subscriptions\": [\"interfaces\"]\n    }\n  },\n  \"subscriptions\": {\n    \"interfaces\": {\n      \"paths\": [\"/interfaces/interface/state/counters\"],\n      \"mode\": \"stream\",\n      \"stream-mode\": \"sample\",\n      \"sample-interval\": \"10s\"\n    }\n  },\n  \"outputs\": {\n    \"prometheus\": {\n      \"type\": \"prometheus\",\n      \"listen\": \":9804\"\n    }\n  },\n  \"inputs\": {},\n  \"processors\": {},\n  \"tunnel-target-matches\": {}\n}\n```\n\n**Validation Rules:**\n\n- If `targets` are provided, at least one `subscription` is required\n- If `inputs` are provided, at least one `output` is required\n- Empty request is valid (resets all configuration)\n\n**Headers:**\n\n- `Content-Encoding: gzip` - Request body is gzip compressed\n\n---\n\n## Targets\n\n### List Targets (Runtime State)\n\n```\nGET /api/v1/targets\n```\n\nReturns all targets with their runtime state (connection status, active subscriptions).\n\n**Response:**\n\n```json\n[\n  {\n    \"name\": \"router1\",\n    \"state\": \"running\",\n    \"config\": {\n      \"address\": \"10.0.0.1:57400\",\n      \"username\": \"admin\",\n      \"skip-verify\": true\n    },\n    \"subscriptions\": {\n      \"interfaces\": {\n        \"state\": \"running\"\n      }\n    }\n  }\n]\n```\n\n### Get Target (Runtime State)\n\n```\nGET /api/v1/targets/{name}\n```\n\nReturns a specific target with its runtime state.\n\n### List Target Configurations\n\n```\nGET /api/v1/config/targets\n```\n\nReturns target configurations (without runtime state).\n\n### Get Target Configuration\n\n```\nGET /api/v1/config/targets/{name}\n```\n\n### Create/Update Target\n\n```\nPOST /api/v1/config/targets\n```\n\n**Request Body:**\n\n```json\n{\n  \"name\": \"router1\",\n  \"address\": \"10.0.0.1:57400\",\n  \"username\": \"admin\",\n  \"password\": \"admin\",\n  \"skip-verify\": true,\n  \"subscriptions\": [\"interfaces\"],\n  \"outputs\": [\"prometheus\"]\n}\n```\n\n### Delete Target\n\n```\nDELETE /api/v1/config/targets/{name}\n```\n\n### Update Target Subscriptions\n\n```\nPATCH /api/v1/config/targets/{name}/subscriptions\n```\n\n**Request Body:**\n\n```json\n{\n  \"subscriptions\": [\"interfaces\", \"bgp\"]\n}\n```\n\n### Update Target Outputs\n\n```\nPATCH /api/v1/config/targets/{name}/outputs\n```\n\n**Request Body:**\n\n```json\n{\n  \"outputs\": [\"prometheus\", \"influxdb\"]\n}\n```\n\n### Update Target State\n\n```\nPOST /api/v1/config/targets/{name}/state\nPOST /api/v1/targets/{name}/state/{state}\n```\n\nEnable or disable a target. State can be `enabled` or `disabled`.\n\n---\n\n## Subscriptions\n\n### List Subscriptions (Runtime State)\n\n```\nGET /api/v1/subscriptions\n```\n\nReturns subscriptions with their runtime state (which targets are using them).\n\n**Response:**\n\n```json\n[\n  {\n    \"name\": \"interfaces\",\n    \"config\": {\n      \"paths\": [\"/interfaces/interface/state/counters\"],\n      \"mode\": \"stream\",\n      \"stream-mode\": \"sample\",\n      \"sample-interval\": \"10s\"\n    },\n    \"targets\": {\n      \"router1\": {\n        \"state\": \"running\"\n      }\n    }\n  }\n]\n```\n\n### Get Subscription (Runtime State)\n\n```\nGET /api/v1/subscriptions/{name}\n```\n\n### List Subscription Configurations\n\n```\nGET /api/v1/config/subscriptions\n```\n\n### Get Subscription Configuration\n\n```\nGET /api/v1/config/subscriptions/{name}\n```\n\n### Create/Update Subscription\n\n```\nPOST /api/v1/config/subscriptions\n```\n\n**Request Body:**\n\n```json\n{\n  \"name\": \"interfaces\",\n  \"paths\": [\"/interfaces/interface/state/counters\"],\n  \"mode\": \"stream\",\n  \"stream-mode\": \"sample\",\n  \"sample-interval\": \"10s\",\n  \"encoding\": \"json\",\n  \"outputs\": [\"prometheus\"]\n}\n```\n\n### Delete Subscription\n\n```\nDELETE /api/v1/config/subscriptions/{name}\n```\n\n---\n\n## Outputs\n\n### List Output Configurations\n\n```\nGET /api/v1/config/outputs\n```\n\n**Response:**\n\n```json\n{\n  \"prometheus\": {\n    \"type\": \"prometheus\",\n    \"listen\": \":9804\",\n    \"path\": \"/metrics\"\n  }\n}\n```\n\n### Get Output Configuration\n\n```\nGET /api/v1/config/outputs/{name}\n```\n\n### Create/Update Output\n\n```\nPOST /api/v1/config/outputs\n```\n\n**Request Body:**\n\n```json\n{\n  \"name\": \"prometheus\",\n  \"type\": \"prometheus\",\n  \"listen\": \":9804\",\n  \"path\": \"/metrics\",\n  \"event-processors\": [\"trim-prefixes\"]\n}\n```\n\n### Delete Output\n\n```\nDELETE /api/v1/config/outputs/{name}\n```\n\n### Update Output Processors\n\n```\nPATCH /api/v1/config/outputs/{name}/processors\n```\n\n**Request Body:**\n\n```json\n{\n  \"event-processors\": [\"processor1\", \"processor2\"]\n}\n```\n\n**Note:** Currently returns `501 Not Implemented`.\n\n---\n\n## Inputs\n\n### List Input Configurations\n\n```\nGET /api/v1/config/inputs\n```\n\n**Response:**\n\n```json\n{\n  \"nats-input\": {\n    \"type\": \"nats\",\n    \"address\": \"nats://localhost:4222\",\n    \"subject\": \"telemetry.>\"\n  }\n}\n```\n\n### Get Input Configuration\n\n```\nGET /api/v1/config/inputs/{name}\n```\n\n### Create/Update Input\n\n```\nPOST /api/v1/config/inputs\n```\n\n**Request Body:**\n\n```json\n{\n  \"name\": \"nats-input\",\n  \"type\": \"nats\",\n  \"address\": \"nats://localhost:4222\",\n  \"subject\": \"telemetry.>\",\n  \"outputs\": [\"prometheus\"],\n  \"event-processors\": [\"add-tags\"]\n}\n```\n\n### Delete Input\n\n```\nDELETE /api/v1/config/inputs/{name}\n```\n\n### Update Input Processors\n\n```\nPATCH /api/v1/config/inputs/{name}/processors\n```\n\n**Note:** Currently returns `501 Not Implemented`.\n\n### Update Input Outputs\n\n```\nPATCH /api/v1/config/inputs/{name}/outputs\n```\n\n**Note:** Currently returns `501 Not Implemented`.\n\n---\n\n## Processors\n\n### List Processor Configurations\n\n```\nGET /api/v1/config/processors\n```\n\n**Response:**\n\n```json\n[\n  {\n    \"name\": \"trim-prefixes\",\n    \"type\": \"event-strings\",\n    \"config\": {\n      \"value-names\": [\".*\"],\n      \"transforms\": [...]\n    }\n  }\n]\n```\n\n### Get Processor Configuration\n\n```\nGET /api/v1/config/processors/{name}\n```\n\n### Create/Update Processor\n\n```\nPOST /api/v1/config/processors\n```\n\n### Delete Processor\n\n```\nDELETE /api/v1/config/processors/{name}\n```\n\n---\n\n## Tunnel Target Matches\n\n### List Tunnel Target Matches\n\n```\nGET /api/v1/config/tunnel-target-matches\n```\n\n### Get Tunnel Target Match\n\n```\nGET /api/v1/config/tunnel-target-matches/{name}\n```\n\n### Create/Update Tunnel Target Match\n\n```\nPOST /api/v1/config/tunnel-target-matches\n```\n\n**Request Body:**\n\n```json\n{\n  \"name\": \"srl-devices\",\n  \"target-type\": \"srlinux\",\n  \"subscriptions\": [\"interfaces\"],\n  \"outputs\": [\"prometheus\"]\n}\n```\n\n### Delete Tunnel Target Match\n\n```\nDELETE /api/v1/config/tunnel-target-matches/{name}\n```\n\n---\n\n## Cluster Endpoints\n\n### Get Cluster Status\n\n```\nGET /api/v1/cluster\n```\n\nReturns the current cluster status including membership and target distribution.\n\n### Get Leader\n\n```\nGET /api/v1/cluster/leader\n```\n\nReturns information about the current cluster leader.\n\n### Release Leadership\n\n```\nDELETE /api/v1/cluster/leader\n```\n\nForces the current leader to release leadership (triggers new election).\n\n### Get Members\n\n```\nGET /api/v1/cluster/members\n```\n\nReturns list of cluster members with their status.\n\n### Drain Instance\n\n```\nPOST /api/v1/cluster/members/{id}/drain\n```\n\nDrains all targets from a specific instance (moves them to other instances).\n\n### Rebalance\n\n```\nPOST /api/v1/cluster/rebalance\n```\n\nTriggers a rebalance of targets across cluster members.\n\n### Move Target\n\n```\nPOST /api/v1/cluster/move\n```\n\nMoves a specific target to a different instance.\n\n**Request Body:**\n\n```json\n{\n  \"target\": \"router1\",\n  \"instance\": \"collector-2\"\n}\n```\n\n---\n\n## Assignments\n\n### List Assignments\n\n```\nGET /api/v1/assignments\n```\n\nReturns current target-to-instance assignments.\n\n### Get Assignment\n\n```\nGET /api/v1/assignments/{target}\n```\n\n### Create Assignment\n\n```\nPOST /api/v1/assignments\n```\n\nManually assign a target to an instance.\n\n### Delete Assignment\n\n```\nDELETE /api/v1/assignments/{target}\n```\n\n---\n\n## Metrics\n\n```\nGET /metrics\n```\n\nReturns Prometheus metrics for the collector (if `enable-metrics: true` in api-server config).\n\n---\n\n## Examples\n\n### Using curl\n\n```bash\n# List all targets\ncurl http://localhost:7890/api/v1/targets\n\n# Create a target\ncurl -X POST http://localhost:7890/api/v1/config/targets \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"name\": \"router1\",\n    \"address\": \"10.0.0.1:57400\",\n    \"username\": \"admin\",\n    \"password\": \"admin\",\n    \"skip-verify\": true,\n    \"subscriptions\": [\"interfaces\"]\n  }'\n\n# Delete a target\ncurl -X DELETE http://localhost:7890/api/v1/config/targets/router1\n\n# Apply full configuration\ncurl -X POST http://localhost:7890/api/v1/config/apply \\\n  -H \"Content-Type: application/json\" \\\n  -d @config.json\n\n# Apply gzipped configuration\ncurl -X POST http://localhost:7890/api/v1/config/apply \\\n  -H \"Content-Type: application/json\" \\\n  -H \"Content-Encoding: gzip\" \\\n  --data-binary @config.json.gz\n```\n\n### Using gnmic CLI\n\nThe collector subcommands use the same API endpoints:\n\n```bash\n# Uses GET /api/v1/targets\ngnmic --config collector.yaml collect targets list\n\n# Uses GET /api/v1/targets/{name}\ngnmic --config collector.yaml collect targets get --name router1\n\n# Uses POST /api/v1/config/targets\ngnmic --config collector.yaml collect targets set --input target.yaml\n\n# Uses DELETE /api/v1/config/targets/{name}\ngnmic --config collector.yaml collect targets delete --name router1\n```\n"
  },
  {
    "path": "docs/user_guide/collector/collector_configuration.md",
    "content": "# Collector Configuration\n\nThis page describes the configuration options specific to the collector mode. For general configuration options (targets, subscriptions, outputs, inputs, processors), refer to their respective documentation pages.\n\n## API Server\n\nThe API server is required for the collector to accept configuration changes and serve status information.\n\n```yaml\napi-server:\n  # string, address to listen on in the form \"host:port\"\n  # the host part can be omitted to listen on all interfaces\n  address: :7890\n  # duration, request timeout\n  # split equally between read and write timeouts\n  timeout: 10s \n  # TLS configuration for secure API access\n  tls:\n    # string, path to CA certificate file\n    # used to verify client certificates\n    ca-file:\n    # string, path to server certificate file\n    cert-file:    \n    # string, path to server private key file\n    key-file:\n    # string, client authentication mode\n    # one of: \"\", \"request\", \"require\", \"verify-if-given\", \"require-verify\"\n    #\n    # - \"\":              no client certificate requested\n    # - \"request\":       request certificate, don't require it, don't verify\n    # - \"require\":       require certificate, don't verify\n    # - \"verify-if-given\": request certificate, verify if provided\n    # - \"require-verify\": require and verify certificate\n    #\n    # defaults to \"\" if no ca-file, \"require-verify\" if ca-file is set\n    client-auth: \"\"\n  \n  # boolean, enable Prometheus metrics endpoint at /metrics\n  enable-metrics: false\n  # boolean, enable debug logging for API requests\n  debug: false\n```\n\n## Clustering\n\nClustering enables multiple collector instances to work together for high availability and load distribution.\n\n```yaml\nclustering:\n  # string, cluster name\n  # instances with the same cluster name form a cluster\n  # used in leader lock key and target lock keys\n  # defaults to \"default-cluster\"\n  cluster-name: default-cluster\n  # string, unique instance name within the cluster\n  # used as value in target locks and leader lock\n  # defaults to \"gnmic-$UUID\" if not set\n  instance-name: \"\"  \n  # string, service address to register with the locker (e.g., Consul)\n  # defaults to the address part of api-server address\n  service-address: \"\"\n  # duration, how long to watch for service changes (Consul blocking query)\n  # defaults to 60s\n  services-watch-timer: 60s\n  # duration, interval between target distribution checks by the leader\n  # defaults to 20s\n  targets-watch-timer: 20s\n  # duration, max time to wait for an instance to lock an assigned target\n  # if exceeded, leader reassigns the target to another instance\n  # defaults to 10s\n  target-assignment-timeout: 10s\n  # duration, time to wait after becoming leader before distributing targets\n  # allows other instances to register their API services\n  # defaults to 5s\n  leader-wait-timer: 5s\n  # tags used for target placement decisions\n  # targets with matching tags are preferentially assigned to this instance\n  tags: []\n  # locker configuration (required for clustering)\n  locker:\n    # string, locker type\n    type: consul \n    # string, locker server address\n    address: localhost:8500\n    # string, datacenter name (Consul-specific)\n    datacenter: dc1\n    # string, username for HTTP basic auth\n    username:\n    # string, password for HTTP basic auth  \n    password:    \n    # string, ACL token\n    token:\n    # duration, session TTL\n    session-ttl: 10s\n    # duration, delay before lock can be acquired after release\n    delay: 5s\n    # duration, time between lock retry attempts\n    retry-timer: 2s\n    # boolean, enable debug logging\n    debug: false\n```\n\n<!-- ## gNMI Server\n\nThe embedded gNMI server allows the collector to serve collected data to downstream gNMI clients.\n\n```yaml\ngnmi-server:\n  # string, address to listen on\n  address: :57400\n  \n  # TLS configuration\n  tls:\n    ca-file:\n    cert-file:\n    key-file:\n    client-auth: \"\"\n  \n  # integer, maximum concurrent subscribe RPCs\n  max-subscriptions: 64\n  \n  # integer, maximum concurrent Get/Set RPCs\n  max-unary-rpc: 64\n  \n  # integer, maximum receive message size in bytes\n  # defaults to 4MB\n  max-recv-msg-size:\n  \n  # integer, maximum send message size in bytes\n  # defaults to MaxInt32\n  max-send-msg-size:\n  \n  # integer, maximum concurrent streams per RPC\n  max-concurrent-streams:\n  \n  # duration, TCP keepalive time and interval\n  # negative value disables keepalive\n  tcp-keepalive:\n  \n  # gRPC keepalive configuration\n  keepalive:\n    max-connection-idle:\n    max-connection-age:\n    max-connection-age-grace:\n    time: 120m\n    timeout: 20s\n  \n  # duration, minimum sample interval\n  # enforced when client requests smaller interval\n  min-sample-interval: 1ms\n  \n  # duration, default sample interval\n  # used when client requests 0 interval\n  default-sample-interval: 1s\n  \n  # duration, minimum heartbeat interval\n  min-heartbeat-interval: 1s\n  \n  # boolean, enable Prometheus gRPC metrics\n  enable-metrics: false\n  \n  # boolean, enable debug logging\n  debug: false\n  \n  # cache configuration for the gNMI server\n  cache:\n    # string, cache type: \"oc\" (OpenConfig) or \"redis\"\n    type: oc\n    \n    # string, address (for redis type)\n    address:\n    \n    # string, username (for redis type)\n    username:\n    \n    # string, password (for redis type)\n    password:\n    \n    # duration, cache expiration time\n    expiration: 0s\n    \n    # boolean, enable debug logging\n    debug: false\n  \n  # Consul service registration\n  service-registration:\n    address:\n    datacenter:\n    username:\n    password:\n    token:\n    check-interval: 5s\n    max-fail: 3\n    name:\n    tags: []\n``` -->\n\n## Tunnel Server\n\nThe tunnel server accepts connections from gNMI tunnel targets.\n\n```yaml\ntunnel-server:\n  # string, address to listen on\n  address: :57401\n  \n  # TLS configuration\n  tls:\n    ca-file:\n    cert-file:\n    key-file:\n    client-auth: \"\"\n  \n  # boolean, enable debug logging\n  debug: false\n```\n\n## Tunnel Target Matches\n\nDefine rules for handling tunnel target connections.\n\n```yaml\ntunnel-target-matches:\n  # match rule name\n  match-all:\n    # string, target id to match (from tunnel target Register RPC)\n    id: \"*\"\n    # string, target type to match, typically GNOI_GNMI (from tunnel target Register RPC)\n    type: \"GNMMI_GNOI\"\n    \n    # list of subscription names to apply\n    subscriptions:\n      - interfaces\n      - system\n    \n    # list of output names to send data to\n    outputs:\n      - prometheus\n```\n\nNote that tunnel-target-matches are not processed in any specific order. It's adviced to make sure there is no overlap between the rules `type` and `id`.\n\n## Complete Example\n\n```yaml\n# API server (required)\napi-server:\n  address: :7890\n  timeout: 10s\n  enable-metrics: true\n\n# Clustering (optional, for HA)\nclustering:\n  cluster-name: production-cluster\n  instance-name: collector-1\n  locker:\n    type: consul\n    address: consul.service.consul:8500\n    session-ttl: 10s\n\n# gNMI server\ngnmi-server:\n  address: :57400\n  skip-verify: true\n  cache:\n    type: oc\n    expiration: 60s\n\n# Tunnel server\ntunnel-server:\n  address: :57401\n\n# Tunnel target matches\ntunnel-target-matches:\n  srl-devices:\n    id: router1\n    type: \"GNMI_GNOI\"\n    subscriptions:\n      - interfaces\n    outputs:\n      - prometheus\n\n# Targets\ntargets:\n  spine1:\n    address: 10.0.0.1:57400\n    username: admin\n    password: admin\n    skip-verify: true\n    subscriptions:\n      - interfaces\n      - bgp\n    outputs:\n      - prometheus\n\n# Subscriptions\nsubscriptions:\n  interfaces:\n    paths:\n      - /interfaces/interface/state/counters\n    mode: stream\n    stream-mode: sample  \n    sample-interval: 10s\n  \n  bgp:\n    paths:\n      - /network-instances/network-instance/protocols/protocol/bgp\n    mode: stream\n    stream-mode: on-change\n\n# Outputs\noutputs:\n  prometheus:\n    type: prometheus\n    listen: :9804\n    path: /metrics\n    event-processors:\n      - trim-prefixes\n\n# Processors\nprocessors:\n  trim-prefixes:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - trim-prefix:\n            apply-on: name\n            prefix: /interfaces/interface/state/\n```\n"
  },
  {
    "path": "docs/user_guide/collector/collector_intro.md",
    "content": "# Collector Mode\n\n## Introduction\n\nThe Collector mode (`gnmic collect --config <file_name>`) is ideal for a long-running telemetry collection service.\n\nWhile the `subscribe` command is designed for interactive use and ad-hoc data collection, the `collect` command is optimized for continuous operation with dynamic configuration capabilities.\n\n## Dynamic Configuration\n\nUnlike gNMIc running with the subscribe command, the collector allows runtime modifications without restarts.\n\nYou can add, update or remove **Targets**, **Susbcriptions**, **Outputs**, **Processors** and **Inputs**. All the changes are applied at runtime.\n\nAll configuration changes are made via a REST API.\n\n## Clustering\n\nMultiple collector instances can form a cluster just like gNMIc subscribe.\n\nThe cluster uses a distributed locker, such as **Consul**, for:\n\n- Leader election\n- Target assignment coordination\n- Instance membership tracking\n\n## Tunnel Target Support\n\nThe collector supports gRPC tunnel, it will accept connections from gNMI tunnel targets.\nThe tunnel target configutation is done using tunnel-target-matches.\n\n## Comparison with Subscribe Command\n\n| Feature | `subscribe` Command | `collect` Command |\n|---------|---------------------|-------------------|\n| Configuration | Static (file/flags) | Dynamic (both file and REST API) |\n| Target management | Fixed at startup or using loaders | startup file, loaders or REST API |\n| Subscription management | Fixed at startup, can be modified using the REST API but requires a target restart to get applied | Add/update/remove at runtime using REST API |\n| Output management | Fixed at startup | Add/update/remove at runtime using REST API |\n| Tunnel targets | Fixed at startup | dynamic using target tunnel matching rules |\n\n## Getting Started\n\n1. Create a configuration file with at minimum the `api-server` section\n2. Start the collector: `gnmic --config collector.yaml collect`\n3. Use the REST API or CLI subcommands to manage configuration\n\nSee [Collector Configuration](./collector_configuration.md) for detailed configuration options and [Collector REST API](./collector_api.md) for API reference.\n"
  },
  {
    "path": "docs/user_guide/configuration_env.md",
    "content": "`gnmic` can be configured using environment variables, it will read the environment variables starting with `GNMIC_`.\n\nThe Env variable names are inline with the flag names as well as the configuration hierarchy.\n\nFor e.g to set the gNMI username, the env variable `GNMIC_USERNAME` should be set.\n\n### Constructing environment variables names\n\n#### Flags to environment variables mapping\n\nGlobal flags to env variable name mapping:\n\n| **Flag name**        | **ENV variable name**    |\n| -------------------- | ------------------------ |\n| --address            | GNMIC_ADDRESS            |\n| --encoding           | GNMIC_ENCODING           |\n| --format             | GNMIC_FORMAT             |\n| --insecure           | GNMIC_INSECURE           |\n| --log                | GNMIC_LOG                |\n| --log-file           | GNMIC_LOG_FILE           |\n| --no-prefix          | GNMIC_NO_PREFIX          |\n| --password           | GNMIC_PASSWORD           |\n| --prometheus-address | GNMIC_PROMETHEUS_ADDRESS |\n| --proxy-from-env     | GNMIC_PROXY_FROM_ENV     |\n| --retry              | GNMIC_RETRY              |\n| --skip-verify        | GNMIC_SKIP_VERIFY        |\n| --timeout            | GNMIC_TIMEOUT            |\n| --tls-ca             | GNMIC_TLS_CA             |\n| --tls-cert           | GNMIC_TLS_CERT           |\n| --tls-key            | GNMIC_TLS_KEY            |\n| --tls-max-version    | GNMIC_TLS_MAX_VERSION    |\n| --tls-min-version    | GNMIC_TLS_MIN_VERSION    |\n| --tls-version        | GNMIC_TLS_VERSION        |\n| --log-tls-secret     | GNMIC_LOG_TLS_SECRET     |\n| --username           | GNMIC_USERNAME           |\n| --cluster-name       | GNMIC_CLUSTER_NAME       |\n| --instance-name      | GNMIC_INSTANCE_NAME      |\n| --proto-file         | GNMIC_PROTO_FILE         |\n| --proto-dir          | GNMIC_PROTO_DIR          |\n| --token              | GNMIC_TOKEN              |\n\n#### Configuration file to environment variables mapping\n\nFor configuration items that do not have a corresponding flag, the env variable will be constructed from the path elements to the variable name joined with a `_`.\n\nFor e.g to set the clustering locker address, as in the yaml blob below:\n\n```yaml\nclustering:\n  locker:\n    address: \n```\n\nthe env variable `GNMIC_CLUSTERING_LOCKER_ADDRESS` should be set\n\n!!! note\n\n    - Configuration items of type list cannot be set using env vars.\n    - Intermediate configuration keys should not contain `_` or `-`.\n\nExample:\n\n```yaml\noutputs:\n  output1:  # <-- should not contain `_` or `-`\n    type: prometheus\n    listen: :9804\n```\n\nIs equivalent to:  \n`GNMIC_OUTPUTS_OUTPUT1_TYPE=prometheus`  \n`GNMIC_OUTPUTS_OUTPUT1_LISTEN=:9804`\n"
  },
  {
    "path": "docs/user_guide/configuration_file.md",
    "content": "`gnmic` configuration by means of the command line flags is both consistent and reliable. But sometimes its not the best way forward.\n\nWith lots of configuration options that `gnmic` supports it might get tedious to pass them all via CLI flags. In cases like that the file-based configuration comes handy.\n\nWith a configuration file a user can specify all the command line flags by means of a single file. `gnmic` will read this file and retrieve the configuration options from it.\n\n### What options can be in a file?\nConfiguration file allows a user to specify everything that can be supplied over the CLI and more.\n#### Global flags\nAll of the [global](#global-flags) flags can be put in a conf file. Consider the following example of a typical configuration file in YAML format:\n```yaml\n# gNMI target address; CLI flag `--address`\naddress: \"10.0.0.1:57400\"\n# gNMI target user name; CLI flag `--username`\nusername: admin\n# gNMI target user password; CLI flag `--password`\npassword: NokiaSrl1!\n# connection mode; CLI flag `--insecure`\ninsecure: true\n# log file location; CLI flag `--log-file`\nlog-file: /tmp/gnmic.log\n```\nWith such a file located at a default path the gNMI requests can be made in a very short and concise form:\n\n```bash\n# configuration file is read by its default path\ngnmi capabilities\n\n# cfg file has all the global options set, so only the local flags are needed\ngnmi get --path /configure/system/name\n```\n\n#### Local flags\nLocal flags have the scope of the command where they have been defined. Local flags can be put in a configuration file as well.\n\nTo avoid flags names overlap between the different commands a command name should prepend the flag name - `<cmd name>-<flag name>`.\n\nSo, for example, we can provide the [`path`](../cmd/get.md#path) flag of a [`get`](../cmd/get.md) command in the file by adding the `get-` prefix to the local flag name:\n\n```yaml\naddress: \"router.lab:57400\"\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nget-path: /configure/system/name  # `get` command local flag\n```\n\nAnother example: the [`update-path`](../cmd/set.md#1-in-line-update-implicit-type) flag of a [`set`](../cmd/set.md) will be `set-update-path` in the configuration file.\n\n#### Targets\nIt is possible to specify multiple targets with different configurations (credentials, timeout,...). This is described in [Multiple targets](targets/targets.md) documentation article.\n\n#### Subscriptions\nIt is possible to specify multiple subscriptions and associate them with different targets in a flexible way. This configuration option is described in [Multiple subscriptions](subscriptions.md) documentation article.\n\n#### Outputs\nThe other mode `gnmic` supports (in contrast to CLI) is running as a daemon and exporting the data received from gNMI subscriptions to [multiple outputs](outputs/output_intro.md) like stan/nats, kafka, file, prometheus, influxdb, etc...\n\n#### Inputs\n`gnmic` supports reading gNMI data from a set of [inputs](inputs/input_intro.md) and export the data to any of the configured outputs. This is used when building data pipelines with `gnmic`\n\n### Repeated flags\nIf a flag can appear more than once on the CLI, it can be represented as a list in the file.\n\nFor example one can set multiple paths for get/set/subscribe operations. In the following example we define multiple paths for the [`get`](../cmd/get.md) command to operate on:\n```yaml\naddress: \"router.lab:57400\"\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nget-path:\n    - /configure/system/name\n    - /state/system/version\n```\n\n### Options preference\nConfiguration passed via CLI flags and Env variables take precedence over the file config.\n\n### Environment variables in file\nEnvironment variables can be used in the configuration file and will be expanded at the time the configuration is read.\n\n```yaml\noutputs:\n  output1:\n    type: nats\n    address: ${NATS_IP}:4222\n```"
  },
  {
    "path": "docs/user_guide/configuration_flags.md",
    "content": "`gnmic` supports a set of global flags, applicable to all sub commands, as well as local flags which are specific to each sub command.\n\n- [Global flags](../global_flags.md)\n- Local flags:\n    - [Capabilities](../cmd/capabilities.md)\n    - [Get](../cmd/get.md)\n    - [Set](../cmd/set.md)\n    - [Subscribe](../cmd/subscribe)\n    - [Prompt](../cmd/prompt.md)\n    - [Path](../cmd/path.md)\n    - [Listen](../cmd/listen.md)\n "
  },
  {
    "path": "docs/user_guide/configuration_intro.md",
    "content": "`gnmic` reads configuration from three different sources,\n[Global and local flags](configuration_flags.md), [environment variables](configuration_env.md) and [local system file](configuration_file.md).\n\nThe different sources follow a precedence order where a configuration variable from a source take precedence over the next one in the below list:\n\n- global and local flags\n- Environment variables\n- configuration file\n\n## Flags\n\nSee [here](configuration_flags.md) for a complete list of the supported global and local flags.\n\n## Environment variables\n\n`gnmic` can also be configured using environment variables, it will read the environment variables starting with `GNMIC_`.\n\nThe Env variable names are inline with the flag names as well as the configuration hierarchy.\n\nSee [here](configuration_env.md) for more details on environment variables.\n\n## File configuration\n\nConfiguration file that `gnmic` reads must be in one of the following formats: JSON, YAML, TOML, HCL or Properties.  \n\nBy default, `gnmic` will search for a file named `.gnmic.[yml/yaml, toml, json]` in the following locations and will use the first file that exists:\n\n* `$PWD`\n* `$HOME`\n* `$XDG_CONFIG_HOME`\n* `$XDG_CONFIG_HOME/gnmic`\n\nThe default path can be overridden with [`--config`](../global_flags.md#config) flag.\n\n```bash\n# config file default path is :\n# $PWD/.gnmic.[yml, toml, json], or\n# $HOME/.gnmic.[yml, toml, json], or\n# $XDG_CONFIG_HOME/.gnmic.[yml, toml, json], or\n# $XDG_CONFIG_HOME/gnmic/.gnmic.[yml, toml, json]\ngnmic capabilities\n\n# read `cfg.yml` file located in the current directory\ngnmic --config ./cfg.yml capabilities\n```\n\nIf the file referenced by `--config` flag is not present, the default path won't be tried.\n\nExample of the `gnmic` config files are provided in the following formats: [YAML](https://github.com/openconfig/gnmic/blob/main/config.yaml), [JSON](https://github.com/openconfig/gnmic/blob/main/config.json), [TOML](https://github.com/openconfig/gnmic/blob/main/config.toml)."
  },
  {
    "path": "docs/user_guide/event_processors/event_add_tag.md",
    "content": "The `event-add-tag` processor adds a set of tags to an event message if one of the configured regular expressions in the values, value names, tags or tag names sections matches.\n\nIt is possible to overwrite a tag if it's name already exists.\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-add-tag:\n      # jq expression, if evaluated to true, the tags are added\n      condition: \n      # list of regular expressions to be matched against the tags names, if matched, the tags are added\n      tag-names:\n      # list of regular expressions to be matched against the tags values, if matched, the tags are added\n      tags:\n      # list of regular expressions to be matched against the values names, if matched, the tags are added\n      value-names:\n      # list of regular expressions to be matched against the values, if matched, the tags are added\n      values:\n      # list of regular expressions to be matched against the deleted paths, if matched, the tags are added\n      deletes:\n      # boolean, if true tags are over-written with the added ones if they already exist.\n      overwrite:\n      # map of tags to be added\n      add: \n        tag_name: tag_value\n```\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-add-tag:\n      value-names:\n        - \".\"\n      add: \n        tag_name: tag_value\n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"sub1\",\n      \"timestamp\": 1607678293684962443,\n      \"tags\": {\n        \"interface_name\": \"mgmt0\",\n        \"source\": \"172.20.20.5:57400\"\n      },\n      \"values\": {\n        \"Carrier_Transitions\": 1,\n        \"In_Broadcast_Packets\": 448,\n        \"In_Error_Packets\": 0,\n        \"In_Fcs_Error_Packets\": 0,\n        \"In_Multicast_Packets\": 47578,\n        \"In_Octets\": 15557349,\n        \"In_Unicast_Packets\": 6482,\n        \"Out_Broadcast_Packets\": 110,\n        \"Out_Error_Packets\": 0,\n        \"Out_Multicast_Packets\": 10,\n        \"Out_Octets\": 464766\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"sub1\",\n      \"timestamp\": 1607678293684962443,\n      \"tags\": {\n        \"interface_name\": \"mgmt0\",\n        \"source\": \"172.20.20.5:57400\",\n        \"tag_name\": \"tag_value\"\n    },\n      \"values\": {\n        \"Carrier_Transitions\": 1,\n        \"In_Broadcast_Packets\": 448,\n        \"In_Error_Packets\": 0,\n        \"In_Fcs_Error_Packets\": 0,\n        \"In_Multicast_Packets\": 47578,\n        \"In_Octets\": 15557349,\n        \"In_Unicast_Packets\": 6482,\n        \"Out_Broadcast_Packets\": 110,\n        \"Out_Error_Packets\": 0,\n        \"Out_Multicast_Packets\": 10,\n        \"Out_Octets\": 464766\n      }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_allow.md",
    "content": "The `event-allow` processor allows only messages matching the configured `condition` or one of the regular expressions under `tags`, `tag-names`, `values` or `value-names`.\n\nNon matching messages are dropped.\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-allow:\n      # jq expression, if evaluated to true, the message is allowed\n      condition: \n      # list of regular expressions to be matched against the tags names, \n      # if matched, the message is allowed\n      tag-names:\n      # list of regular expressions to be matched against the tags values,\n      # if matched, the message is allowed\n      tags:\n      # list of regular expressions to be matched against the values names,\n      # if matched, the message is allowed\n      value-names:\n      # list of regular expressions to be matched against the values,\n      # if matched, the message is allowed\n      values:\n```\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  allow-processor:\n    # processor type\n    event-allow:\n      condition: \".tags.interface_name == 1/1/1\"\n```\n\n=== \"Event format before\"\n    ```json\n    [\n      {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n      },\n      {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"1/1/1\",\n            \"source\": \"172.23.23.3:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n      }\n    ]\n    ```\n=== \"Event format after\"\n    ```json\n    [\n      {\n      },\n      {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"1/1/1\",\n            \"source\": \"172.23.23.3:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n      }\n    ]\n    ```\n\n\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_combine.md",
    "content": "The `event-combine` processor combines multiple processors together. \nThis allows to declare processors once and reuse them to build more complex processors.\n\n### Configuration\n\n```yaml\nprocessors:\n  # processor name\n  pipeline1:\n    # processor type\n    event-combine:\n      # list of regex to be matched with the values names\n      processors: \n          # The \"sub\" processor execution condition. A jq expression.\n        - condition: \n          # the processor name, should be declared in the\n          # `processors` section.\n          name: \n      # enable extra logging\n      debug: false\n```\n\n### Conditional Execution of Subprocessors\n\nThe workflow for processing event messages can include multiple subprocessors, each potentially governed by its own condition. These conditions are defined using the jq query language, enabling dynamic and precise control over when each subprocessor should be executed.\n\n### Defining Conditions for Subprocessors\n\nWhen configuring your subprocessors, you have the option to attach a jq-based condition to each one. The specified condition acts as a gatekeeper, determining whether the corresponding subprocessor should be activated for a particular event message.\n\n### Condition Evaluation Process\n\nFor a subprocessor to run, the following criteria must be met:\n\nCondition Presence: If a condition is specified for the subprocessor, it must be evaluated.\n\nCondition Outcome: The result of the jq condition evaluation must be true.\n\nCombined Conditions: In scenarios where both the main processor and the subprocessor have associated conditions, both conditions must independently evaluate to true for the subprocessor to be triggered.\n\nOnly when all relevant conditions are met will the subprocessor execute its designated operations on the event message.\n\nIt is important to note that the absence of a condition is equivalent to a condition that always evaluates to true. Thus, if no condition is provided for a subprocessor, it will execute as long as the main processor's condition (if any) is met.\n\nBy using conditional execution, you can build sophisticated and efficient event message processing workflows that react dynamically to the content of the messages.\n\n### Examples\n\nIn the below example, we define 3 regular processors and 2 `event-combine` processors.\n\n- `proc1`: Allows event message that have tag `\"interface_name = ethernet-1/1`\n\n- `proc2`: Renames values names to their path base.\n             e.g: `interface/statistics/out-octets` --> `out-octets`\n\n- `proc3`: Converts any values with a name ending with `octets` to `int`.\n\n- `pipeline1`: Combines `proc1`, `proc2` and `proc3`, applying `proc2` only to subscription `sub1`\n\n- `pipeline2`: Combines `proc2` and `proc3`, applying `proc2` only to subscription `sub2`\n\nThe 2 combine processors can be linked with different outputs.\n\n```yaml\nprocessors:\n  proc1:\n    event-allow:\n      condition: '.tags.interface_name == \"ethernet-1/1\"'\n\n  proc2:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - path-base:\n            apply-on: \"name\"\n  proc3:\n    event-convert:\n      value-names: \n        - \".*octets$\"\n      type: int \n  \n\n  pipeline1:\n    event-combine:\n      processors: \n        - name: proc1\n        - condition: '.tags[\"subscription-name\"] == \"sub1\"'\n          name: proc2\n        - name: proc3\n  \n  pipeline2:\n    event-combine:\n      processors: \n        - condition: '.tags[\"subscription-name\"] == \"sub2\"'\n          name: proc2\n        - name: proc3\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_convert.md",
    "content": "The `event-convert` processor converts the values matching one of the regular expressions to a specific type: `uint`, `int`, `string`, `float` or `bool`\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  convert-int-processor:\n    # processor type\n    event-convert:\n      # list of regex to be matched with the values names\n      value-names: \n        - \".*octets$\"\n      # the desired value type, one of: int, uint, string, float, bool\n      type: int \n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": \"7753940\"\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": 7753940\n      }\n    }\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_data_convert.md",
    "content": "The `event-data-convert` processor converts data values matching one of the regular expressions from/to a specific data unit:\n\n| Symbol | Unit    | Symbol | Unit      | Symbol  | Unit     |\n| ------ | ------- | ------ | --------- | --------| -------- |\n| `b`    | Bit     | `B`    | Byte      | `KiB`   | KibiByte |\n| `kb`   | kiloBit | `KB`   | KiloByte  | `MiB`   | MebiByte |\n| `mb`   | MegaBit | `MB`   | MegaByte  | `GiB`   | GibiByte |\n| `gb`   | GigaBit | `GB`   | GigaByte  | `TiB`   | TebiByte |\n| `tb`   | TeraBit | `TB`   | TeraByte  | `EiB`   | ExbiByte |\n| `eb`   | ExaBit  | `EB`   | ExaByte   | `ZiB`   | ZebiByte |\n|        |         | `ZB`   | ZetaByte  | `YiB`   | YobiByte |\n|        |         | `YB`   | YottaByte |         |          |\n\nThe source values can be of any numeric type including a string with or without a unit, e.g: `2.3`, `1KB` or `1.1 TB`.\n\nThe unit of the original value can be derived as `Byte` from its name if it ends with `-bytes`, `-octets`, `_bytes` or `_octets`.\n\n### Examples\n\n#### simple conversion\n\nThe below processor will convert any value with a name ending in `-octets` from `Byte` to `KiloByte`.\n\n```yaml\nprocessors:\n  # processor name\n  convert-data-unit:\n    # processor type\n    event-data-convert:\n      # list of regex to be matched with the values names\n      value-names: \n        - \".*-octets$\"\n      # the source value unit, defaults to B (Byte)\n      from: B\n      # the desired value unit, defaults to B (Byte)\n      to: KB\n      # keep the original value, \n      # a new value name will be added with the converted value,\n      # the new value name will be the original name with _$to as suffix \n      # if no regex renaming is defined using `old` and `new`\n      keep: false\n      # old, a regex to be used to rename the converted value\n      old: \n      # new, the replacement string\n      new:\n      # debug, enables this processor logging\n      debug: false\n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": \"2048\"\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": 2\n      }\n    }\n    ```\n\n#### conversion with renaming\n\nThe below data convert processor converts any value with a name ending in `-octets` from Byte to Kilobyte.\nIt will retain the original value while renaming the new value name by replacing `-octets` with `-kilobytes`.\n\n```yaml\nprocessors:\n  # processor name\n  convert-data-unit:\n    # processor type\n    event-data-convert:\n      # list of regex to be matched with the values names\n      value-names: \n        - \".*-octets$\"\n      # the source value unit, defaults to B (Byte)\n      from: B\n      # the desired value unit, defaults to B (Byte)\n      to: KB\n      # keep the original value, \n      # a new value name will be added with the converted value,\n      # the new value name will be the original name with _$to as suffix \n      # if no regex renaming is defined using `old` and `new`\n      keep: true\n      # old, a regex to be used to rename the converted value\n      old: ^(\\S+)-octets$\n      # new, the replacement string\n      new: ${1}-kilobytes\n      # debug, enables this processor logging\n      debug: false\n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": \"2048\"\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"/state/port/ethernet/statistics/in-octets\": \"2048\"\n        \"/state/port/ethernet/statistics/in-kilobytes\": 2\n      }\n    }\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_date_string.md",
    "content": "The `event-date-string` processor converts a specific timestamp value (under tags or values) to a string representation. The format and location can be configured.\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  convert-timestamp-processor:\n    # processor type\n    event-date-string:\n      # list of regex to be matched with the values names\n      value-names: \n        - \"timestamp\"\n      # received timestamp unit\n      precision: ms\n      # desired date string format, defaults to RFC3339\n      format: \"2006-01-02T15:04:05Z07:00\"\n      # timezone, defaults to the local timezone\n      location: Asia/Taipei\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_delete.md",
    "content": "The `event-delete` processor deletes all tags or values matching a set of regular expressions from the event message.\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  delete-processor:\n    # processor type\n    event-delete:\n      value-names:\n        - \".*multicast.*\"\n        - \".*broadcast.*\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_drop.md",
    "content": "The `event-drop` processor drops the whole message if it matches the configured `condition` or one of the regexes under`tags`, `tag-names`, `values` or `value-names`.\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-drop:\n      # jq expression, if evaluated to true, the message is dropped\n      condition: \n      # list of regular expressions to be matched against the tags names, if matched, the message is dropped\n      tag-names:\n      # list of regular expressions to be matched against the tags values, if matched, the message is dropped\n      tags:\n      # list of regular expressions to be matched against the values names, if matched, the message is dropped\n      value-names:\n      # list of regular expressions to be matched against the values, if matched, the message is dropped\n      values:\n```\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  drop-processor:\n    # processor type\n    event-drop:\n      tags:\n        - \"172.23.23.2*\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n    }\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_duration_convert.md",
    "content": "The `event-duration-convert` processor converts duration written as string to a integer with second precision.\n\nThe string format supported is a series of digits and a single letter indicating the unit, e.g 1w3d (1 week 3 days)\nThe highest unit is `w` for week and the lowest is `s` for second.\nAny of the units may or may not be present.\n\n### Examples\n\n#### simple conversion\n\n```yaml\nprocessors:\n  # processor name\n  convert-uptime:\n    # processor type\n    event-duration-convert:\n      # list of regex to be matched with the values names\n      value-names: \n        - \".*_uptime$\"\n      # keep the original value, \n      # a new value name will be added with the converted value,\n      # the new value name will be the original name with _seconds as suffix \n      keep: false\n      # debug, enables this processor logging\n      debug: false\n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"connection_uptime\": \"1w5s\"\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"default\",\n      \"timestamp\": 1607290633806716620,\n      \"tags\": {\n        \"port_port-id\": \"A/1\",\n        \"source\": \"172.17.0.100:57400\",\n        \"subscription-name\": \"default\"\n      },\n      \"values\": {\n        \"connection_uptime\": 604805\n      }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_extract_tags.md",
    "content": "The `event-extract-tags` processor extracts tags from a value, a value name, a tag name or a tag value using regex named groups.\n\nIt is possible to overwrite a tag if its name already exists.\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-extract-tags:\n      # list of regular expressions to be used to extract strings to be added as a tag.\n      tag-names:\n      # list of regular expressions to be used to extract strings to be added as a tag.\n      tags:\n      # list of regular expressions to be used to extract strings to be added as a tag.\n      value-names:\n      # list of regular expressions to be used to extract strings to be added as a tag.\n      values:\n      # boolean, if true tags are over-written with the added ones if they already exist.\n      overwrite:\n      # boolean, enable extra logging\n      debug:\n```\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-extract-tags:\n      value-names:\n        - /([a-zA-Z0-9-_:]+)/(?P<group>[a-zA-Z0-9-_:]+)/([a-zA-Z0-9-_:]+)\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"group\": \"statistics\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_group_by.md",
    "content": "The `event-group-by` processor groups values under the same event message based on a list of tag names.\n\nThis processor is intended to be used together with an output with cached gNMI notifications, like `prometheus` output with `cache: {}`.\n\n### Configuration\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-group-by:\n      # list of strings defining the tags to group by the values under \n      # a single event \n      tags: []\n      # a boolean, if true only the values from events of the same name\n      # are grouped together according to the list of tags\n      by-name:\n      # boolean\n      debug: false\n```\n\n### Examples\n\n#### group by a single tag\n\n```yaml\nprocessors:\n  group-by-source:\n    event-group-by:\n      tags:\n        - source\n```\n\n=== \"Event format before\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\"\n            }\n        },\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_received_messages_malformed_updates\": \"0\",\n                \"bgp_neighbor_received_messages_queue_depth\": 0,\n                \"bgp_neighbor_received_messages_total_messages\": \"424\",\n                \"bgp_neighbor_received_messages_total_non_updates\": \"418\",\n                \"bgp_neighbor_received_messages_total_updates\": \"6\"\n            }\n        }\n    ]\n    ```\n=== \"Event format after\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\",\n                \"bgp_neighbor_received_messages_malformed_updates\": \"0\",\n                \"bgp_neighbor_received_messages_queue_depth\": 0,\n                \"bgp_neighbor_received_messages_total_messages\": \"424\",\n                \"bgp_neighbor_received_messages_total_non_updates\": \"418\",\n                \"bgp_neighbor_received_messages_total_updates\": \"6\"\n            }\n        }\n    ]\n    ```\n\n#### group by multiple tags\n\n```yaml\nprocessors:\n  group-by-queue-id:\n    event-group-by:\n      tags:\n        - source\n        - interface_name\n        - multicast-queue_queue-id\n```\n\n=== \"Event Format Before\"\n    ```json\n    [\n      {\n        \"name\": \"sub1\",\n        \"timestamp\": 1627997491187771616,\n        \"tags\": {\n          \"interface_name\": \"ethernet-1/1\",\n          \"multicast-queue_queue-id\": \"5\",\n          \"source\": \"clab-ndk-srl1:57400\",\n          \"subscription-name\": \"sub1\",\n      },\n        \"values\": {\n          \"/interface/qos/output/multicast-queue/queue-depth/maximum-burst-size\": \"0\"\n        }\n      },\n      {\n        \"name\": \"sub1\",\n        \"timestamp\": 1627997491187771616,\n        \"tags\": {\n          \"interface_name\": \"ethernet-1/1\",\n          \"multicast-queue_queue-id\": \"5\",\n          \"source\": \"clab-ndk-srl1:57400\",\n          \"subscription-name\": \"sub1\",\n        },\n        \"values\": {\n          \"/interface/qos/output/multicast-queue/scheduling/peak-rate-bps\": \"0\"\n        }\n      }\n    ]\n    ```\n=== \"Event Format After\"\n    ```json\n    [\n      {\n        \"name\": \"sub1\",\n        \"timestamp\": 1627997491187771616,\n        \"tags\": {\n          \"interface_name\": \"ethernet-1/1\",\n          \"multicast-queue_queue-id\": \"5\",\n          \"source\": \"clab-ndk-srl1:57400\",\n          \"subscription-name\": \"sub1\",\n      },\n        \"values\": {\n          \"/interface/qos/output/multicast-queue/queue-depth/maximum-burst-size\": \"0\",\n          \"/interface/qos/output/multicast-queue/scheduling/peak-rate-bps\": \"0\"\n        }\n      }\n    ]\n    ```"
  },
  {
    "path": "docs/user_guide/event_processors/event_ieeefloat32.md",
    "content": "The `event-ieeefloat32` processor allows converting binary data received from a router with the type IEEE 32-bit floating point number.\n\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-ieeefloat32:\n      # jq expression, if evaluated to true, the processor applies based on the field `value-names`\n      condition: \n      # list of regular expressions to be matched against the values names, if matched, the value is converted to a float32.\n      value-names: []\n```\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-ieeefloat32:\n      value-names:\n        - \"^components/component/power-supply/state/output-current$\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n      \"name\": \"sub1\",\n      \"timestamp\": 1607678293684962443,\n      \"tags\": {\n        \"source\": \"172.20.20.5:57400\"\n      },\n      \"values\": {\n        \"components/component/power-supply/state/output-current\": \"QEYAAA==\"\n      }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n      \"name\": \"sub1\",\n      \"timestamp\": 1607678293684962443,\n      \"tags\": {\n        \"source\": \"172.20.20.5:57400\",\n    },\n      \"values\": {\n        \"components/component/power-supply/state/output-current\": 3.09375\n      }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_jq.md",
    "content": "The `event-jq` processor applies a [`jq`](https://stedolan.github.io/jq/) expression on the received event messages.\n\n`jq` expressions are a powerful tool that can be used to slice, filter, map, transform JSON object.\n\nThe `event-jq` processor uses two configuration fields, `condition` and `expression`, both support `jq` expressions.\n\n- `condition` (that needs to return a boolean value) determines if the processor is to be applied on the event message.\nif `false` the message is returned as is.\n\n- `expression` is used to transform, filter and/or enrich the messages. \nIt needs to return a JSON object that can be mapped to an array of event messages.\n\nThe event messages resulting from a single `gNMI` Notification are passed to the jq expression as a JSON array.\n\nSome `jq` expression examples:\n\n- Select messages with name \"sub1\" that include a value called \"counter1\" with value higher than 90\n```yaml\nexpression: .[] | select(.name==\"sub1\" and .values.counter1 > 90)\n```\n\n- Delete values with name \"counter1\"\n\n```yaml\nexpression: .[] | del(.values.counter1)\n```\n\n- Delete values with names \"counter1\" or \"counter2\"\n\n```yaml\nexpression: .[] | del(.values.[\"counter1\", \"counter2\"])\n```\n\n- Delete tags with names \"tag1\" or \"tag2\"\n```yaml\nexpression: .[] | del(.tags.[\"tag1\", \"tag2\"])\n```\n\n- Add a tag called \"my_new_tag\" with value \"tag1\"\n```yaml\nexpression: .[] |= (.tags.my_new_tag = \"tag1\")\n```\n\n- Move a value to tag under a custom key\n```yaml\nexpression: .[] |= (.tags.my_new_tag_name = .values.value_name)\n```\n\n### Configuration\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-jq:\n      # condition of application of the processor\n      condition:\n      # jq expression to transform/filter/enrich the message\n      expression:\n      # boolean enabling extra logging\n      debug:\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_merge.md",
    "content": "The `event-merge` processor merges multiple event messages together based on some criteria.\n\nEach [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) in a [gNMI subscribe Response Notification](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L79) is transformed into an [Event Message](intro.md)\n\nThe `event-merge` processor is used to merge the updates into one event message if it's needed.\n\nThe default merge strategy is based on the timestamp, the updates with the same timestamp will be merged into the same event message.\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-merge:\n      # if always is set to true, \n      # the updates are merged regardless of the timestamp values\n      always: false\n      debug: false\n```\n\n\n=== \"Event format before\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\"\n            }\n        },\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_received_messages_malformed_updates\": \"0\",\n                \"bgp_neighbor_received_messages_queue_depth\": 0,\n                \"bgp_neighbor_received_messages_total_messages\": \"424\",\n                \"bgp_neighbor_received_messages_total_non_updates\": \"418\",\n                \"bgp_neighbor_received_messages_total_updates\": \"6\"\n            }\n        }\n    ]\n    ```\n=== \"Event format after\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\",\n                \"bgp_neighbor_received_messages_malformed_updates\": \"0\",\n                \"bgp_neighbor_received_messages_queue_depth\": 0,\n                \"bgp_neighbor_received_messages_total_messages\": \"424\",\n                \"bgp_neighbor_received_messages_total_non_updates\": \"418\",\n                \"bgp_neighbor_received_messages_total_updates\": \"6\"\n            }\n        }\n    ]\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_override_ts.md",
    "content": "The `event-override-ts` processor overrides the message timestamp with `time.Now()`. The precision `s`, `ms`, `us` or `ns` (default) can be configured.\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  set-timestamp-processor:\n    # processor type\n    event-override-ts:\n      # timestamp precision, s, ms, us, ns (default)\n      precision: ms\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_plugin.md",
    "content": "The `event-plugin` processor initializes a processor that gNMIc loaded from the configured path under the `plugins:` section.\n\n```yaml\nplugins:\n  # path to load plugin binaries from.\n  path: /path/to/plugin/bin\n  # glob to match binaries against.\n  glob: \"*\"\n  # sets a start timeout for plugins.\n  start-timeout: 0s\n```\n\nThe specific configuration of an `event-plugin` processor varies from one plugin to another. But they are configured just like any other processor i.e under the `processors:` section of the config file and linked to outputs by name reference.\n\nThe below configuration snippet initializes the `event-add-hostname` processor (a binary stored under `plugins.path`) and links to output `out1`.\n\n```yaml\nprocessors:\n  proc1:\n    event-add-hostname:\n      debug: true\n      # the tag name to add with the host hostname as a tag value.\n      hostname-tag-name: \"collector-host\"\n      # read-interval controls how often the plugin runs the hostname cmd to get the host hostanme\n      # by default it's at most every 1 minute\n      read-interval: 1m\n\noutputs:\n  out1:\n    type: file\n    format: event\n    event-processors:\n      - proc1\n```\n\n### Examples\n\nSee [here](https://github.com/openconfig/gnmic/tree/main/examples/plugins).\n\n### Writing a plugin processor\n\nCurrently plugin processor can only be written in Golang. It relies on Hashicorp's [go-plugin](https://github.com/hashicorp/go-plugin) package for discovery and communication with gNMIc's main process.\n\nTo write your own processor you can use the below skeleton code as a starting point. Can be found [here](https://github.com/openconfig/gnmic/tree/main/examples/plugins/minimal) as well.\n\n1. Choose a name for your processor\n2. Add struct fields to decode the processor's config into.\n3. Implement your processor's logic under the `Apply` method\n4. Optionally, store the `targets`,`actions` and `processors` config maps given to the processor under your processor's struct (`myProcessor`) if they are relevant to your processor's logic.\n\n```go\npackage main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nconst (\n\t// TODO: Choose a name for your processor\n\tprocessorType = \"event-my-processor\"\n)\n\ntype myProcessor struct {\n\t// TODO: Add your config struct fields here\n}\n\nfunc (p *myProcessor) Init(cfg interface{}, opts ...formatters.Option) error {\n\t// decode the plugin config\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// apply options\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\t// TODO: Other initialization steps...\n\treturn nil\n}\n\nfunc (p *myProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\t// TODO: The processor's logic is applied here\n\treturn event\n}\n\nfunc (p *myProcessor) WithActions(act map[string]map[string]interface{}) {\n}\n\nfunc (p *myProcessor) WithTargets(tcs map[string]*types.TargetConfig) {\n}\n\nfunc (p *myProcessor) WithProcessors(procs map[string]map[string]any) {\n}\n\nfunc (p *myProcessor) WithLogger(l *log.Logger) {\n}\n\nfunc main() {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tOutput:      os.Stderr,\n\t\tDisableTime: true,\n\t})\n\n\tlogger.Info(\"starting plugin processor\", \"name\", processorType)\n\n\t// TODO: Create and initialize your processor's struct\n\tplug := &myProcessor{}\n\t// start it\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: logger,\n\t})\n}\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_rate_limit.md",
    "content": "The `event-rate-limit` processor rate-limits each event with matching tags to the configured amount per-seconds.\n\nAll the tags for each event is hashed, and if the hash matches a previously seen event, then the timestamp \nof the event itself is compared to assess if the configured limit has been exceeded.\nIf it has, then this new event is dropped from the pipeline.\n\nThe cache for comparing timestamp is an LRU cache, with a default size of 1000 that can be increased for bigger deployments.\n\nTo account for cases where the device will artificially split the event into multiple chunks (with the same timestamp), \nthe rate-limiter will ignore events with exactly the same timestamp.\n\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  rate-limit-100pps:\n    # processor type\n    event-rate-limit:\n      # rate of filtering, in events per seconds\n      per-second: 100\n      # set the cache size for doing the rate-limiting comparison\n      # default value is 1000\n      cache-size: 10000\n      # debug for additionnal logging of dropped events\n      debug: true\n```"
  },
  {
    "path": "docs/user_guide/event_processors/event_starlark.md",
    "content": "### Intro\n\nThe `event-starlark` processor applies a [`Starlark`](https://github.com/google/starlark-go/blob/master/doc/spec.md) function on a list of `event` messages before returning them to the processors pipeline and then to the output.\n\n`starlark` is a dialect of Python, developed initially for the [Bazel build tool](https://bazel.build/) but found multiple uses as a configuration language embedded in a larger application.\n\nThere are a few differences between Python and Starlark, programs written in Starlark are supposed to be short-lived and have no external side effects, their main result is structured data or side effects on the host application. As a result, Starlark has no need for classes, exceptions, reflection, concurrency, and other such features of Python.\n\n`gNMIc` uses the [Go implementation](https://github.com/google/starlark-go/blob/master/doc/spec.md) of Starlark.\n\nA Starlark program running as a `gNMIc` processor should define an `apply` function that takes an arbitrary number of arguments of type `Event` and returns zero or more `Event`s.\n\nAn [`Event`](intro.md#the-event-format) is the transformed gNMI update message as `gNMIc` processes it.\n\n```python\ndef apply(*events)\n  # events transformed/augmented/filtered here\n  return events\n```\n\n### Configuration\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-starlark:\n      # the source of the starlark program.\n      source: |\n        def apply(*events):\n          # processor logic here\n          return events\n      # path to a file containing the starlark program to run.\n      # Mutually exclusive with `source` parameter. \n      script:\n      # boolean enabling extra logging\n      debug: false\n```\n\n### Writing a Starlark processor\n\nTo write a starlark processor all that is needed is writing a function called `apply` that will read/modify/delete a list of `Event` messages.\n\nStarlark [specification](https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md) defines multiple builtin types and functions.\n\n`gNMIc` provides additional builtin functions like `Event(name)` which creates a new `Event` message and `copy_event(Event)` which duplicates a given `Event` message.\n\nThe `Event` message comprises a few fields:\n\n- `name`: string\n\n- `timestamp`: int64\n\n- `tags`: dictionary of string to string\n\n- `values`: dictionary of string to any\n\n- `deletes`: list of strings\n\nStarlark allows for the dynamic [loading of other modules](https://github.com/bazelbuild/starlark/blob/master/spec.md#load-statements). In the context of gNMIc, the following two modules are available for loading within a starlark program:\n\n- **time**: `load(\"time.star\", \"time\")` loads the time library which provides the following functions to work with the `Event` message timestamp field:\n    - `time.from_timestamp(sec, nsec)`:\n\n          Converts the given Unix time corresponding to the number of seconds\n          and (optionally) nanoseconds since January 1, 1970 UTC into an object\n          of type Time. \n          \n          For more details, refer to https://pkg.go.dev/time#Unix.\n\n    - `time.is_valid_timezone(loc)`:\n\n          Reports whether loc is a valid time zone name.\n\n    -  `time.now()`:\n\n          Returns the current local time.\n\n    -  `time.parse_duration(d)`:\n\n          Parses the given duration string.\n          \n          For more details, refer to https://pkg.go.dev/time#ParseDuration.\n\n    - `time.parse_time(x, format, location)`:\n\n          Parses the given time string using a specific time format and location.\n          The expected arguments are a time string (mandatory), a time format\n          (optional, set to RFC3339 by default, e.g. \"2021-03-22T23:20:50.52Z\")\n          and a name of location (optional, set to UTC by default). \n          \n          For more details, refer to https://pkg.go.dev/time#Parse and https://pkg.go.dev/time#ParseInLocation.\n\n    -  `time.time(year, month, day, hour, minute, second, nanosecond, location)`:\n\n          Returns the Time corresponding to `yyyy-mm-dd hh:mm:ss + nsec nanoseconds` in the appropriate zone for that time\n          in the given location. All the parameters are optional.\n\n- **math**: `load(\"math.star\", \"math\")` loads the math library which provides a set of constants and math-related functions:\n    - `ceil(x)`:\n\n          Returns the ceiling of x, the smallest integer greater than or equal to x.\n\n    - `copysign(x, y)`:\n\n         Returns a value with the magnitude of x and the sign of y.\n\n    - `fabs(x)`:\n\n         Returns the absolute value of x as float.\n\n    - `floor(x)`:\n\n         Returns the floor of x, the largest integer less than or equal to x.\n\n    - `mod(x, y)`:\n\n         Returns the floating-point remainder of x/y. The magnitude of the result is less than y and its sign agrees with that of x.\n\n    - `pow(x, y)`:\n\n         Returns x**y, the base-x exponential of y.\n\n    - `remainder(x, y)`:\n\n         Returns the IEEE 754 floating-point remainder of x/y.\n\n    - `round(x)`:\n\n         Returns the nearest integer, rounding half away from zero.\n\n    - `exp(x)`:\n\n         Returns e raised to the power x, where e = 2.718281… is the base of natural logarithms.\n\n    - `sqrt(x)`:\n\n         Returns the square root of x.\n\n    - `acos(x)`:\n\n         Returns the arc cosine of x, in radians.\n\n    - `asin(x)`:\n\n         Returns the arc sine of x, in radians.\n\n    - `atan(x)`:\n         \n         Returns the arc tangent of x, in radians.\n\n    - `atan2(y, x)`:\n\n        Returns atan(y / x), in radians.\n        The result is between -pi and pi.\n        The vector in the plane from the origin to point (x, y) makes this angle with the positive X axis.\n        The point of atan2() is that the signs of both inputs are known to it, so it can compute the correct\n        quadrant for the angle.\n        For example, atan(1) and atan2(1, 1) are both pi/4, but atan2(-1, -1) is -3*pi/4.\n\n    - `cos(x)`:\n\n        Returns the cosine of x, in radians.\n\n    - `hypot(x, y)`:\n\n        Returns the Euclidean norm, sqrt(x*x + y*y). This is the length of the vector from the origin to point (x, y).\n\n    - `sin(x)`:\n\n        Returns the sine of x, in radians.\n\n    - `tan(x)`:\n\n        Returns the tangent of x, in radians.\n\n    - `degrees(x)`:\n\n        Converts angle x from radians to degrees.\n\n    - `radians(x)`:\n\n        Converts angle x from degrees to radians.\n\n    - `acosh(x)`:\n\n        Returns the inverse hyperbolic cosine of x.\n\n    - `asinh(x)`:\n    \n        Returns the inverse hyperbolic sine of x.\n\n    - `atanh(x)`:\n    \n        Returns the inverse hyperbolic tangent of x.\n\n    - `cosh(x)`:\n\n        Returns the hyperbolic cosine of x.\n\n    - `sinh(x)`:\n\n        Returns the hyperbolic sine of x.\n\n    - `tanh(x)`:\n\n        Returns the hyperbolic tangent of x.\n\n    - `log(x, base)`:\n\n        Returns the logarithm of x in the given base, or natural logarithm by default.\n\n    - `gamma(x)`:\n    \n        Returns the Gamma function of x.\n\n### Examples\n\n#### Move a value to a tag\n\n```python\ndef apply(*events):\n  dels = []\n  for e in events:\n    for k, v in e.values.items():\n      if k == \"val1\":\n        e.tags[k] = str(v)\n        dels.append(k)\n    for d in dels:\n      e.values.pop(d)\n  return events\n```\n\n#### Rename values\n\n```python\nval_map = {\n  \"val1\": \"new_val\",\n}\n\ndef apply(*events):\n  for e in events:\n    for k, v in e.values.items():\n      if k in val_map:\n        e.values[val_map[k]] = v\n        e.values.pop(k)\n  return events\n```\n\n#### Convert strings to integers\n\n```python\ndef apply(*events):\n  for e in events:\n    for k, v in e.values.items():\n      if v.isdigit():\n        e.values[k] = int(v)\n  return events\n```\n\n#### Set an interface description as a tag\n\nThis script stores each interface description per target/interface in a cache and\nadds it to other values as a tag.\n\n```python\ncache = {}\n\ndef apply(*events):\n  evs = []\n  # check if on the event messages contains an interface description\n  # and store in th cache dict\n  for e in events:\n    if e.values.get(\"/interface/description\"):\n      target_if = e.tags[\"source\"] + \"_\" + e.tags[\"interface_name\"]\n      cache[target_if] = e.values[\"/interface/description\"]\n  # for each event get the 'source' and 'interface_name', check\n  # if a corresponding cache entry exists and set it as a \n  # 'description' tag\n  for e in events:\n    if e.tags.get(\"source\") and e.tags.get(\"interface_name\"):\n      target_if = e.tags[\"source\"] + \"_\" + e.tags[\"interface_name\"]\n      if cache.get(target_if):\n        e.tags[\"description\"] = cache[target_if]\n    evs.append(e)\n  return evs\n```\n\n#### Calculate new values based on the received ones\n\nThe below script calculates the avg, min, max of a list of values over their last N=10 values\n\n```python\ncache = {}\n\nvalues_names = [\n  '/interface/statistics/out-octets',\n  '/interface/statistics/in-octets'\n]\n\nN=10\n\ndef apply(*events):\n  for e in events:\n    for value_name in values_names:\n      v = e.values.get(value_name)\n      # check if v is not None and is a digit to proceed\n      if not v.isdigit():\n        continue\n      # update cache with the latest value\n      val_key = \"_\".join([e.tags[\"source\"], e.tags[\"interface_name\"], value_name])\n      if not cache.get(val_key):\n        # initialize the cache entry if empty\n        cache.update({val_key: []})\n      if len(cache[val_key]) >= N:\n        # remove the oldest entry if the number of entries reached N\n        cache[val_key] = cache[val_key][1:]\n      # update cache entry\n      cache[val_key].append(int(v))\n      # get the list of values\n      val_list = cache[val_key]\n      # calculate min, max and avg\n      e.values[value_name+\"_min\"] = min(val_list)\n      e.values[value_name+\"_max\"] = max(val_list)\n      e.values[value_name+\"_avg\"] = avg(val_list)\n  return events\n\ndef avg(vals):\n  sum = 0\n  for v in vals:\n    sum = sum + v\n  return sum/len(vals)\n```\n\nThe below script builds on top of the previous one by adding the rate calculation to the added values.\nNow the cache contains a timestamp as well as the value.\n\n```python\ncache = {}\n\nvalues_names=[\n  '/interface/statistics/out-octets',\n  '/interface/statistics/in-octets'\n]\n\nN=10\n\ndef apply(*events):\n  for e in events:\n    for value_name in values_names:\n      v = e.values.get(value_name)\n      # check if v is not None and is a digit to proceed\n      if not v.isdigit():\n        continue\n      # update cache with the latest value\n      val_key = \"_\".join([e.tags[\"source\"], e.tags[\"interface_name\"], value_name])\n      if not cache.get(val_key):\n        # initialize the cache entry if empty\n        cache.update({val_key: []})\n      if len(cache[val_key]) >= N:\n        # remove the oldest entry if the number of entries reached N\n        cache[val_key] = cache[val_key][1:]\n      # update cache entry\n      cache[val_key].append((e.timestamp, int(v)))\n      # get the list of values\n      val_list = cache[val_key]\n      # calculate min, max and avg\n      vals = [x[1] for x in val_list]\n      e.values[value_name+\"_min\"] = min(vals)\n      e.values[value_name+\"_max\"] = max(vals)\n      e.values[value_name+\"_avg\"] = avg(vals)\n      if len(val_list) > 1:\n        e.values[value_name+\"_rate\"] = rate(val_list[-2:])\n  return events\n\ndef avg(vals):\n  sum = 0\n  for v in vals:\n    sum = sum + v\n  return sum/len(vals)\n\ndef rate(vals):\n  period = (vals[1][0] - vals[0][0]) / 1000000000\n  change = vals[1][1] - vals[0][1]\n  return change / period\n```\n\n#### Ungroup values\n\nThe below script ungroups values part of the same event message producing an event message per value.\n\n```python\ndef apply(*events):\n  ungrouped_events = []\n  for e in events:\n    for k, v in e.values.items():\n      # create a new event without any value\n      new_event = Event(e.name, e.timestamp, e.tags)\n      # add a single value to the new event\n      new_event.values[k] = v\n      # add the new event to the array\n      ungrouped_events.append(new_event)\n  return ungrouped_events\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_strings.md",
    "content": "The `event-strings` processor exposes a few of Golang strings transformation functions, there functions can be applied to tags, tag names, values or value names. \n\nSupported functions:\n\n* `strings.Replace`\n* `strings.TrimPrefix`\n* `strings.TrimSuffix`\n* `strings.Title`\n* `strings.ToLower`\n* `strings.ToUpper`\n* `strings.Split`\n* `filepath.Base`\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names: []\n      tag-names: []\n      values: []\n      tags: []\n      transforms:\n        # strings function name\n        - replace:\n            apply-on:  # apply the transformation on name or value\n            keep: # keep the old value or not if the name changed\n            old: # string to be replaced\n            new: #replacement string of old\n        - trim-prefix:\n            apply-on: # apply the transformation on name or value\n            prefix: # prefix to be trimmed\n        - trim_suffix:\n            apply-on: # apply the transformation on name or value\n            suffix: # suffix to be trimmed\n        - title:\n            apply-on: # apply the transformation on name or value\n        - to-upper:\n            apply-on: # apply the transformation on name or value\n        - to-lower:\n            apply-on: # apply the transformation on name or value\n        - split:\n            apply-on: # apply the transformation on name or value\n            split-on: # character to split on\n            join-with: # character to join with\n            ignore-first: # number of first items to ignore when joining\n            ignore-last: # number of last items to ignore when joining\n        - path-base:\n            apply-on: # apply the transformation on name or value \n```\n### Examples\n\n#### replace\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - replace:\n            apply-on: \"name\"\n            old: \"-\"\n            new: \"_\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"carrier-transitions\": \"1\",\n            \"in-error-packets\": \"0\",\n            \"in-fcs-error-packets\": \"0\",\n            \"in-octets\": \"65382630\",\n            \"in-unicast-packets\": \"107154\",\n            \"out-error-packets\": \"0\",\n            \"out-octets\": \"64721394\",\n            \"out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"carrier_transitions\": \"1\",\n            \"in_error_packets\": \"0\",\n            \"in_fcs_error_packets\": \"0\",\n            \"in_octets\": \"65382630\",\n            \"in_unicast_packets\": \"107154\",\n            \"out_error_packets\": \"0\",\n            \"out_octets\": \"64721394\",\n            \"out_unicast_packets\": \"105876\"\n        }\n    }\n    ```\n\n#### trim-prefix\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - trim-prefix:\n            apply-on: \"name\"\n            prefix: \"/srl_nokia-interfaces:interface/statistics/\"\n\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"carrier-transitions\": \"1\",\n            \"in-broadcast-packets\": \"3797\",\n            \"in-error-packets\": \"0\",\n            \"in-fcs-error-packets\": \"0\",\n            \"in-multicast-packets\": \"288033\",\n            \"in-octets\": \"65382630\",\n            \"in-unicast-packets\": \"107154\",\n            \"out-broadcast-packets\": \"614\",\n            \"out-error-packets\": \"0\",\n            \"out-multicast-packets\": \"11\",\n            \"out-octets\": \"64721394\",\n            \"out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n\n#### to-upper\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      tag-names:\n        - \"interface_name\"\n        - \"subscription-name\"\n      transforms:\n        # strings function name\n        - to-upper:\n            apply-on: \"value\"\n\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n       {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"MGMT0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"DEFAULT\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n#### path-base\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - path-base:\n            apply-on: \"name\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"carrier-transitions\": \"1\",\n            \"in-broadcast-packets\": \"3797\",\n            \"in-error-packets\": \"0\",\n            \"in-fcs-error-packets\": \"0\",\n            \"in-multicast-packets\": \"288033\",\n            \"in-octets\": \"65382630\",\n            \"in-unicast-packets\": \"107154\",\n            \"out-broadcast-packets\": \"614\",\n            \"out-error-packets\": \"0\",\n            \"out-multicast-packets\": \"11\",\n            \"out-octets\": \"64721394\",\n            \"out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n\n#### split\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - split:\n            on: \"name\"\n            split-on: \"/\"\n            join-with: \"_\"\n            ignore-first: 1\n\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"statistics_carrier-transitions\": \"1\",\n            \"statistics_in-broadcast-packets\": \"3797\",\n            \"statistics_in-error-packets\": \"0\",\n            \"statistics_in-fcs-error-packets\": \"0\",\n            \"statistics_in-multicast-packets\": \"288033\",\n            \"statistics_in-octets\": \"65382630\",\n            \"statistics_in-unicast-packets\": \"107154\",\n            \"statistics_out-broadcast-packets\": \"614\",\n            \"statistics_out-error-packets\": \"0\",\n            \"statistics_out-multicast-packets\": \"11\",\n            \"statistics_out-octets\": \"64721394\",\n            \"statistics_out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n#### multiple transforms\n\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        # strings function name\n        - path-base:\n            apply-on: \"name\"\n        - title:\n            apply-on: \"name\"\n        - replace:\n            apply-on: \"name\"\n            old: \"-\"\n            new: \"_\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n            \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"3797\",\n            \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"288033\",\n            \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"65382630\",\n            \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"107154\",\n            \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"614\",\n            \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n            \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"11\",\n            \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"64721394\",\n            \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"105876\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607291271894072397,\n        \"tags\": {\n            \"interface_name\": \"mgmt0\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"Carrier_transitions\": \"1\",\n            \"In_broadcast_packets\": \"3797\",\n            \"In_error_packets\": \"0\",\n            \"In_fcs_error_packets\": \"0\",\n            \"In_multicast_packets\": \"288033\",\n            \"In_octets\": \"65382630\",\n            \"In_unicast_packets\": \"107154\",\n            \"Out_broadcast_packets\": \"614\",\n            \"Out_error_packets\": \"0\",\n            \"Out_multicast_packets\": \"11\",\n            \"Out_octets\": \"64721394\",\n            \"Out_unicast_packets\": \"105876\"\n        }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_time_epoch.md",
    "content": "The event-time-epoch processor is a plugin for gNMIc that converts string-based time values in event messages into epoch timestamps.\nThis is particularly useful when input data includes timestamps in human-readable formats (like RFC3339) and you want to normalize them for downstream systems.\n\n# Configuration\n\n```yaml\nprocessors:\n  convert-timestamp:\n    event-time-epoch:\n      value-names:\n        - \".*timestamp\"\n        - \"lastSeen\"\n      format: \"2006-01-02T15:04:05Z07:00\"\n      precision: \"ms\"\n      debug: true\n```\n\n| Field         | Type       | Description                                                                                      |\n|---------------|------------|--------------------------------------------------------------------------------------------------|\n| `value-names` | `[]string` | List of regular expressions to match against the event `values` keys. Only matching keys will be processed. |\n| `format`      | `string`   | [Go time layout](https://golang.org/pkg/time/#Time.Format) used to parse the input strings. Defaults to RFC3339 format. |\n| `precision`   | `string`   | Desired epoch output precision: `s`, `ms`, `us`, or `ns`. Defaults to nanoseconds.              |\n| `debug`       | `bool`     | Enables verbose logging to stderr or the provided logger.                                        |\n\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607290633806716620,\n        \"tags\": {\n            \"source\": \"172.17.0.100:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"system/timestamp\": \"2025-04-05T10:30:00Z\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607290633806716620,\n        \"tags\": {\n            \"source\": \"172.17.0.100:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"system.timestamp\": 1743849000\n        }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_to_tag.md",
    "content": "The `event-to-tag` processor moves a value matching one of the regular expressions from the values section to the tags section.\nIt's possible to keep the value under values section after moving it.\n\n### Examples\n\n```yaml\nprocessors:\n  # processor name\n  sample-processor:\n    # processor type\n    event-to-tag:\n      value-names:\n        - \".*-state$\"\n```\n\n=== \"Event format before\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607305284170936330,\n        \"tags\": {\n            \"interface_name\": \"ethernet-1/1\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/admin-state\": \"disable\",\n            \"/srl_nokia-interfaces:interface/ifindex\": 54,\n            \"/srl_nokia-interfaces:interface/last-change\": \"2020-11-20T05:52:21.459Z\",\n            \"/srl_nokia-interfaces:interface/oper-down-reason\": \"port-admin-disabled\",\n            \"/srl_nokia-interfaces:interface/oper-state\": \"down\"\n        }\n    }\n    ```\n=== \"Event format after\"\n    ```json\n    {\n        \"name\": \"default\",\n        \"timestamp\": 1607305284170936330,\n        \"tags\": {\n            \"interface_name\": \"ethernet-1/1\",\n            \"source\": \"172.23.23.2:57400\",\n            \"subscription-name\": \"default\",\n            \"/srl_nokia-interfaces:interface/admin-state\": \"disable\",\n            \"/srl_nokia-interfaces:interface/oper-state\": \"down\"\n        },\n        \"values\": {\n            \"/srl_nokia-interfaces:interface/ifindex\": 54,\n            \"/srl_nokia-interfaces:interface/last-change\": \"2020-11-20T05:52:21.459Z\",\n            \"/srl_nokia-interfaces:interface/oper-down-reason\": \"port-admin-disabled\"\n        }\n    }\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_trigger.md",
    "content": "\nThe `event-trigger` processor takes event messages as input and triggers a list of actions (sequentially) if a configured condition evaluates to `true`.\n\nThe condition is evaluated using the the Golang implementation of [jq](https://github.com/itchyny/gojq) with the event message as a `json` input.\n\n`jq` [tutorial](https://stedolan.github.io/jq/tutorial/)\n\n`jq` [manual](https://stedolan.github.io/jq/manual/)\n\n`jq` [playground](https://jqplay.org/)\n\nExamples of conditions:\n\n- The below expression checks if the value named `counter1` has a value higher than 90\n\n```bash\n.values[\"counter1\"] > 90\n```\n\n- This expression checks if the event name is `sub1`, that the tag `source` is equal to `r1:57400`\n\n```bash\n.name == \"sub1\" and .tags[\"source\"] == \"r1:57400\" \n```\n\nThe trigger can be monitored over a configurable window of time (default 1 minute), during which only a certain number of occurrences (default 1) trigger the actions execution.\n\nThe action types availabe can be found [here](../actions/actions.md)\n\n```yaml\nprocessors:\n  # processor name\n  my_trigger_proc: # \n    # processor type\n    event-trigger:\n      # trigger condition\n      condition: '.values[\"counter1\"] > 90'\n      # minimum number of condition occurrences within the configured window \n      # required to trigger the action\n      min-occurrences: 1\n      # max number of times the action is triggered within the configured window\n      max-occurrences: 1\n      # window of time during which max-occurrences need to \n      # be reached in order to trigger the action\n      window: 60s\n      # async, bool. default false.\n      # If true the trigger is executed in the background and the triggering\n      # message is passed to the next procesor. Otherwise it blocks until the trigger returns\n      async: false\n      # a dictionary of variables that is passed to the actions\n      # and can be accessed in the actions templates using `.Vars`\n      vars:\n      # path to a file containing variables passed to the actions\n      # the variable in the `vars` field override the ones read from the file.\n      vars-file: \n      # list of actions to be executed\n      actions:\n        - counter_alert\n```\n\n### Examples\n\n#### Alerting when a threshold is crossed\n\nThe below example triggers an HTTP GET to `http://remote-server:p8080/${router_name}` if the value of counter \"counter1\" crosses 90 twice within 2 minutes.\n\n```yaml\nprocessors:\n  my_trigger_proc:\n    event-trigger:\n      condition: '.values[\"counter1\"] > 90'\n      min-occurrences: 1\n      max-occurrences: 2\n      window: 120s\n      async: true\n      actions:\n        - alert\n\nactions:\n  alert:\n    name: alert\n    type: http\n    method: POST\n    url: http://remote-server:8080/{{ index .Tags \"source\" }}\n    headers: \n      content-type: application/text\n    timeout: 5s\n    body: '\"counter1\" crossed threshold, value={{ index .Values \"counter1\" }}'\n```\n\n#### Enabling backup interface\n\nThe below example shows a trigger that enables a router interface if another interface's operational status changes to \"down\".\n\n```yaml\nprocessors:\n  interface_watch: # \n    event-trigger:\n      debug: true\n      condition: '(.tags.interface_name == \"ethernet-1/1\" or .tags.interface_name == \"ethernet-1/2\") and .values[\"/srl_nokia-interfaces:interface/oper-state\"] == \"down\"'\n      actions:\n      - enable_interface\n\nactions:\n  enable_interface:\n    name: my_gnmi_action\n    type: gnmi\n    rpc: set\n    target: '{{ index .Event.Tags \"source\" }}'\n    paths:\n      - |\n        {{ $interfaceName := \"\" }}\n        {{ if eq ( index .Event.Tags \"interface_name\" ) \"ethernet-1/1\"}}\n        {{$interfaceName = \"ethernet-1/2\"}}\n        {{ else if eq ( index E.vent.Tags \"interface_name\" ) \"ethernet-1/2\"}}\n        {{$interfaceName = \"ethernet-1/1\"}}\n        {{end}}\n        /interface[name={{$interfaceName}}]/admin-state\n    values:\n      - \"enable\"\n    encoding: json_ietf\n    debug: true\n```\n\n#### Clone a network topology and deploy it using containerlab\n\nUsing lldp neighbor information it's possible to build a [containerlab](https://containerlab.srlinux.dev) topology using `gnmic` actions.\n\nIn the below configuration file, an event processor called `clone-topology` is defined.\n\nWhen triggered it runs a series of actions to gather information (chassis type, lldp neighbors, configuration,...) from the defined targets.\n\nIt then builds a containerlab topology from a defined template and the gathered info, writes it to a file and runs a `clab deploy` command.\n\n```yaml\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\n# log: true\n\ntargets:\n  srl1:\n  srl2:\n  srl3:\n\nprocessors:\n  clone-topology:\n    event-trigger:\n      # debug: true\n      actions:\n        - chassis  \n        - lldp  \n        - read_config  \n        - write_config \n        - clab_topo         \n        - deploy_topo\n\nactions:\n  chassis:\n    name: chassis\n    type: gnmi\n    target: all\n    rpc: sub\n    encoding: json_ietf\n    #debug: true\n    format: event\n    paths:\n      - /platform/chassis/type\n  \n  lldp:\n    name: lldp\n    type: gnmi\n    target: all\n    rpc: sub\n    encoding: json_ietf\n    #debug: true\n    format: event\n    paths:\n      - /system/lldp/interface[name=ethernet-*]\n  \n  read_config:\n    name: read_config\n    type: gnmi\n    target: all\n    rpc: get\n    data-type: config\n    encoding: json_ietf\n    #debug: true\n    paths:\n      - /\n  \n  write_config:\n    name: write_config\n    type: template\n    template: |\n      {{- range $n, $m := .Env.read_config }}\n      {{- $filename := print $n  \".json\"}}\n          {{ file.Write $filename (index $m 0 \"updates\" 0 \"values\" \"\" | data.ToJSONPretty \"  \" ) }}\n          {{- end }}\n        #debug: true\n  \n  clab_topo:\n    name: clab_topo\n    type: template\n    # debug: true\n    output: gnmic.clab.yaml\n    template: |\n      name: gNMIc-action-generated\n  \n      topology:\n        defaults:\n          kind: srl\n        kinds:\n          srl:\n            image: ghcr.io/nokia/srlinux:latest\n  \n        nodes:\n      {{- range $n, $m := .Env.lldp }}\n        {{- $type := index $.Env.chassis $n 0 0 \"values\" \"/srl_nokia-platform:platform/srl_nokia-platform-chassis:chassis/type\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-D1\" \"ixrd1\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-D2\" \"ixrd2\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-D3\" \"ixrd3\" }}\n        {{- $type = $type | strings.ReplaceAll \"7250 IXR-6\" \"ixr6\" }}\n        {{- $type = $type | strings.ReplaceAll \"7250 IXR-10\" \"ixr10\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-H1\" \"ixrh1\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-H2\" \"ixrh2\" }}\n        {{- $type = $type | strings.ReplaceAll \"7220 IXR-H3\" \"ixrh3\" }}\n          {{ $n | strings.TrimPrefix \"clab-\" }}:\n            type: {{ $type }}\n            startup-config: {{ print $n \".json\"}}\n      {{- end }}\n      \n        links:\n      {{- range $n, $m := .Env.lldp }}\n        {{- range $rsp := $m }}\n          {{- range $ev := $rsp }}\n            {{- if index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name\" }}\n            {{- $node1 := $ev.tags.source | strings.TrimPrefix \"clab-\" }}\n            {{- $iface1 := $ev.tags.interface_name | strings.ReplaceAll \"ethernet-\" \"e\" | strings.ReplaceAll \"/\" \"-\" }}\n            {{- $node2 := index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name\" }}\n            {{- $iface2 := index $ev.values \"/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/port-id\" | strings.ReplaceAll \"ethernet-\" \"e\" | strings.ReplaceAll \"/\" \"-\" }}\n              {{- if lt $node1 $node2 }}\n          - endpoints: [\"{{ $node1 }}:{{ $iface1 }}\", \"{{ $node2 }}:{{ $iface2 }}\"]\n              {{- end }}\n            {{- end }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    \n  deploy_topo:  \n    name: deploy_topo\n    type: script\n    command: sudo clab dep -t gnmic.clab.yaml --reconfigure\n    debug: true\n```\n\nThe above described processor can be triggered with the below command:\n\n```bash\ngnmic --config clone.yaml get --path /system/name --processor clone-topology\n```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_value_tag.md",
    "content": "The `event-value-tag` processor applies specific values from event messages to tags of other messages, if event tag names match.\n\nEach [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) in a [gNMI subscribe Response Notification](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L79) is transformed into an [Event Message](intro.md)  Additionally, if you are using an output cache, all [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) messages are converted to Events on flush.\n\nThe `event-value-tag` processor is used to extract Values as tags to apply to other Events that have the same K:V tag pairs from the original event message, without merging events with different timestamps.\n\n```yaml\nprocessors:\n  # processor name\n  intf-description:\n    # processor-type\n    event-value-tag:\n      # name of the value to match.  Usually a specific gNMI path\n      value-name: \"/interfaces/interface/state/description\"\n      # if set, use instead of the value name for tag\n      tag-name: \"description\"\n      # if true, remove value from original event when copying\n      consume: false\n      debug: false\n```\n\n=== \"Event format before\"\n    ```json\n    [\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 1,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/counters/in-octets\": 100\n            }\n        },\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 200,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/counters/out-octets\": 100\n            }\n        },\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 200,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/description\": \"Uplink\"\n            }\n        }\n    ]\n    ```\n=== \"Event format after\"\n    ```json\n    [\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 1,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\",\n                \"description\": \"Uplink\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/counters/in-octets\": 100\n            }\n        },\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 200,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\",\n                \"description\": \"Uplink\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/counters/out-octets\": 100\n            }\n        },\n        {\n            \"name\": \"sub1\",\n            \"timestamp\": 200,\n            \"tags\": {\n                \"source\": \"leaf1:6030\",\n                \"subscription-name\": \"sub1\",\n                \"interface_name\": \"Ethernet1\"\n            },\n            \"values\": {\n                \"/interfaces/interface/state/description\": \"Uplink\"\n            }\n        }\n    ]\n    ```\n\n```yaml\n  bgp-description:\n    event-value-tag:\n      value-name: \"neighbor_description\"\n      consume: true\n```\n=== \"Event format before\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\"\n            }\n        },\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {\n                \"neighbor_description\": \"PeerRouter\"\n            }\n        }\n    ]\n    ```\n=== \"Event format after\"\n    ```json\n    [\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n                \"neighbor_description\": \"PeerRouter\"\n            },\n            \"values\": {\n                \"bgp_neighbor_sent_messages_queue_depth\": 0,\n                \"bgp_neighbor_sent_messages_total_messages\": \"423\",\n                \"bgp_neighbor_sent_messages_total_non_updates\": \"415\",\n                \"bgp_neighbor_sent_messages_total_updates\": \"8\",\n            }\n        },\n        {\n            \"name\": \"sub2\",\n            \"timestamp\": 1615284691523204299,\n            \"tags\": {\n                \"neighbor_peer-address\": \"2002::1:1:1:1\",\n                \"network-instance_name\": \"default\",\n                \"source\": \"leaf1:57400\",\n                \"subscription-name\": \"sub2\"\n            },\n            \"values\": {}\n        }\n    ]\n    ```\n"
  },
  {
    "path": "docs/user_guide/event_processors/event_write.md",
    "content": "The `event-write` processor writes a message that has a value or a tag matching one of the configured regular expressions to `stdout`, `stderr` or to a file. \nA custom separator (used between written messages) can be configured, it defaults to `\\n`\n\n```yaml\nprocessors:\n  # processor name\n  write-processor:\n    # processor type\n    event-write:\n      # jq expression, if evaluated to true, the message is written to dst\n      condition: \n      # list of regular expressions to be matched against the tags names, if matched, the message is written to dst\n      tag-names:\n      # list of regular expressions to be matched against the tags values, if matched, the message is written to dst\n      tags:\n      # list of regular expressions to be matched against the values names, if matched, the message is written to dst\n      value-names:\n      # list of regular expressions to be matched against the values, if matched, the message is written to dst\n      values:\n      # path to the destination file\n      dst:\n      # separator to be written between messages\n      separator: \n      # indent to use when marshaling the event message to json\n      indent:\n```\n\n### Examples\n```yaml\nprocessors:\n  # processor name\n  write-processor:\n    # processor type\n    event-write:\n      value-names:\n        - \".\"\n      dst: file.log\n      separator: \"\\n####\\n\"\n      indent: \"  \"\n```\n\n\n``` bash\n$ cat file.log\n{\n  \"name\": \"sub1\",\n  \"timestamp\": 1607582483868459381,\n  \"tags\": {\n    \"interface_name\": \"ethernet-1/1\",\n    \"source\": \"172.20.20.5:57400\",\n    \"subscription-name\": \"sub1\"\n  },\n  \"values\": {\n    \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n    \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"22\",\n    \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"8694\",\n    \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"1740350\",\n    \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"17\",\n    \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"22\",\n    \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"8696\",\n    \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"1723262\",\n    \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"17\"\n  }\n}\n####\n{\n  \"name\": \"sub1\",\n  \"timestamp\": 1607582483868459381,\n  \"tags\": {\n    \"interface_name\": \"ethernet-1/1\",\n    \"source\": \"172.20.20.5:57400\",\n    \"subscription-name\": \"sub1\"\n  },\n  \"values\": {\n    \"/srl_nokia-interfaces:interface/statistics/carrier-transitions\": \"1\",\n    \"/srl_nokia-interfaces:interface/statistics/in-broadcast-packets\": \"22\",\n    \"/srl_nokia-interfaces:interface/statistics/in-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/in-multicast-packets\": \"8694\",\n    \"/srl_nokia-interfaces:interface/statistics/in-octets\": \"1740350\",\n    \"/srl_nokia-interfaces:interface/statistics/in-unicast-packets\": \"17\",\n    \"/srl_nokia-interfaces:interface/statistics/out-broadcast-packets\": \"22\",\n    \"/srl_nokia-interfaces:interface/statistics/out-error-packets\": \"0\",\n    \"/srl_nokia-interfaces:interface/statistics/out-multicast-packets\": \"8696\",\n    \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"1723262\",\n    \"/srl_nokia-interfaces:interface/statistics/out-unicast-packets\": \"17\"\n  }\n}\n####\n```"
  },
  {
    "path": "docs/user_guide/event_processors/intro.md",
    "content": "\nThe event processors provide an easy way to configure a set of functions in order to transform an event message that will be be written to a specific output.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/processors.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fprocessors.drawio\" async></script>\n\n\nWhile the `event` format is the de facto format used by `gNMIc` in case the output is `influxdb` or `prometheus`,\nit can be used with any other output type.\n\nTransforming the received gNMI message is sometimes needed to accomodate the output system ( converting types, complying with name constraints,...), or simply filtering out values that you are not interested on.\n\n<!-- In certain cases, the gNMI updates received need to be changed or processed to make it easier to ingest by the target output.\n\nSome common use cases:\n\n* Customizing a value or a tag name.\n* Converting numbers received as a string to integer or float types. -->\n\n### The event format\n\nThe event format is produced by `gNMIc` from the [gNMI Notification messages](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format) received within a gNMI subscribe response update, it contains 5 fields:\n\n* `name`: A `string` field populated by the subscription name, it is used as a part of the metric name in case of prometheus output or it can be used as the measurement name in case of influxdb output. \n* `timestamp`: An `int64` field containing the timestamp received within the gnmi Update.\n* `tags`: A map of string keys and string values. \nThe keys and values are extracted from the keys in the [gNMI PathElement](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-path-conventions.md#constructing-paths) keys. `gNMIc` adds the subscription name and the target name/address.\n* `values`: A map of string keys and generic values. \nThe keys are build from a xpath representation of the gNMI path without the keys, while the values are extracted from the gNMI [Node values](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#223-node-values).\n* `deletes`: A `string list` built from the `delete` field of the [gNMI Notification message](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format).\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/event_msg.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fevent_msg.drawio\" async></script>\n\n### Defining an event processor\n\nEvent processors are defined under the section `processors` in `gNMIc` configuration file.\n\nEach processor is identified by a name, under which we specify the processor type as well as additional fields specific to each type.\n\n!!! note\n    Processors names are case insensitive\n\nAll processors support a `debug` field that enables extra debug log messages to help troubleshoot the processor transformation.\n\nBelow is an example of an `event-delete` processor, which deletes all values with a name containing `multicast` or `broadcast`\n\n```yaml\nprocessors:\n  # processor name\n  my-processor:\n    # processor type\n    event-delete:\n      value-names:\n        - \".*multicast.*\"\n        - \".*broadcast.*\"\n```\n\n### Linking an event processor to an output\n\nOnce the needed event processors are defined under section `processors`, they can be linked to the desired output(s) in the same file.\n\nEach output can be configured with different event processors allowing flexibility in the way the same data is written to different outputs.\n\nA list of event processors names can be added under an output configuration, the processors will apply in the order they are configured.\n\nIn the below example, 3 event processors are configured and linked to `output1` of type `influxdb`.\n\nThe first processor converts all values type to `integer` if possible.\n\nThe second deletes tags with name starting with `subscription-name`.\n\nFinally the third deletes values with name ending with `out-unicast-packets`.\n\n```yaml\noutputs:\n  output1:\n    type: influxdb\n    url: http://localhost:8086\n    bucket: telemetry\n    token: srl:srl\n    batch-size: 1000\n    flush-timer: 10s\n    event-processors:\n      - proc-convert-integer\n      - proc-delete-tag-name\n      - proc-delete-value-name\n\nprocessors:\n  proc-convert-integer:\n    event-convert:\n      value-names:\n        - \".*\"\n      type: int\n\n  proc-delete-tag-name:\n    event-delete:\n      tag-names:\n        - \"^subscription-name\"\n\n  proc-delete-value-name:\n    event-delete:\n      value-names:\n        - \".*out-unicast-packets\"\n```\n\n### Event processors with cache\n\nIn the scenario where processors are configured under an output with [caching](../outputs/output_intro.md#caching) enabled, the event messages retrieved from the cache are processed as a single set by each processor. This concurrent processing facilitates the application of a logic that merges or combines messages, enabling more complex and integrated processing strategies.\n\n### Event processors pipeline\n\nProcessors under an output are applied in a strict sequential order for each group of event messages received.\n\n### Event processors plugins\n\ngNMIc incorporates the capability to extend its functionality through the use of event processors as plugins. To integrate seamlessly with gNMIc, these plugins need to be written in Golang.\n\nThe communication between gNMIc and these plugins is facilitated by HashiCorp's go-plugin package, which employs `netrpc` as the underlying protocol for this interaction.\n\nSee some plugin examples [here](https://github.com/openconfig/gnmic/examples/plugins)\n"
  },
  {
    "path": "docs/user_guide/gnmi_server.md",
    "content": "# gNMI Server\n\n## Introduction\n\nOn top of acting as `gNMI` client `gNMIc` can run a `gNMI` server that supports `Get`, `Set` and `Subscribe` RPCs.\n\nThe goal is to act as a caching point for the collected gNMI notifications and make them available to other collectors via the `Subscribe` RPC.\n\nUsing this gNMI server feature it is possible to build `gNMI` based clusters and pipelines with `gNMIc`.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmi_server.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmi_server.drawio\" async></script>\n\nThe server keeps a cache of the gNMI notifications received from the defined subscriptions and utilizes it to build the `Subscribe` RPC responses.\n\nThe unary RPCs, Get and Set, are relayed to known targets based on the `Prefix.Target` field.\n\n## Supported features\n\n- Supports gNMI RPCs, Get, Set, Subscribe\n- Acts as a gNMI gateway for Get and Set RPCs.\n- Supports Service registration with Consul server.\n- Supports all types of gNMI subscriptions, `once`, `poll`, `stream`.\n- Supports all types of `stream` subscriptions, `on-change`, `target-defined` and `sample`.\n- Supports `updates-only` with `stream` and `once` subscriptions.\n- Supports `suppress-redundant`.\n- Supports `heartbeat-interval` with `on-change` and `sample` stream subscriptions.\n\n## Get RPC\n\nThe server supports the gNMI `Get` RPC, it allows a client to retrieve `gNMI` notifications from multiple targets into a single `GetResponse`.\n\nIt relies on the `GetRequest` `Prefix.Target` field to select the target(s) against which it will run the Get RPC.\n\nIf `Prefix.Target` is left empty or is equal to `*`, the Get RPC is performed against all known targets.\nThe received GetRequest is cloned, enriched with each target name and sent to the corresponding destination.\n\nComma separated target names are also supported and allow to select a list of specific targets to send the Get RPC to.\n\n```bash\ngnmic -a gnmic-server:57400 get --path /interfaces \\\n                                --target router1,router2,router3\n```\n\nOnce all GetResponses are received back successfully, the notifications contained in each GetResponse are combined into a single GetResponse with each notification's `Prefix.Target` populated, if empty.\n\nThe resulting GetResponse is then returned to the gNMI client.\nIf one of the RPCs fails, an error with status code `Internal(13)` is returned to the client.\n\nIf the GetRequest Path has the `Origin` field set to `gnmic`, the request is performed against the internal `gNMIc` server configuration.\nCurrently only the paths `targets` and `subscriptions` are supported.\n\n```bash\ngnmic -a gnmic-server:57400 get --path gnmic:/targets\ngnmic -a gnmic-server:57400 get --path gnmic:/subscriptions\n```\n\n## Set RPC\n\nThis `gNMI` server supports the gNMI `Set` RPC, it allows a client to run a single `Set` RPC against multiple targets.\n\nJust like in the case of `Get` RPC, the server relies on the `Prefix.Target` field to select the target(s) against which it will run the `Set` RPC.\n\nIf `Prefix.Target` is left empty or is equal to `*`, a Set RPC is performed against all known targets.\nThe received SetRequest is cloned, enriched with each target name and sent to the corresponding destination.\n\nComma separated target names are also supported and allow to select a list of specific targets to send the Set RPC to.\n\n```bash\ngnmic -a gnmic-server:57400 set \\\n        --update /system/ssh-server/admin-state:::json:::disable \\\n        --target router1,router2,router3\n```\n\nOnce all SetResponses are received back successfully, the `UpdateResult`s from each response are merged into a single SetResponse, with the addition of the target name set in `Path.Target`.\n\n!!! note\n    Adding a target value to a non prefix path is not compliant with the gNMI specification which stipulates that the `Target` field should only be present in [Prefix Paths](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target)\n\nThe resulting SetResponse is then returned to the gNMI client.\nIf one of the RPCs fails, an error with status code `Internal(13)` is returned to the client.\n\n## Subscribe RPC\n\nThe `gNMIc` server keeps a cache of gNMI notifications synched with the configured targets based on the configured subscriptions.\n\nThe Subscribe requests received from a client are run against the afore mentioned cache,\nthis means that a client cannot get updates about a leaf that `gNMIc` did not subscribe to as a client.\n\nClients can subscribe to specific target using the gNMI `Prefix.Target` field,\nwhile leaving the `Prefix.Target` field empty or setting it to `*` is equivalent to subscribing to all known targets.\n\n### Subscription Mode\n\n`gNMIc` gNMI Server supports the 3 gNMI specified subscription modes: `Once`, `Poll` and `Stream`.\n\nIt also supports some subscription behavior modifiers:\n\n- `updates-only` with `stream` and `once` subscriptions.\n- `suppress-redundant`.\n- `heartbeat-interval` with `on-change` and `sample` stream subscriptions.\n\n#### [Once](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35151-once-subscriptions)\n\nA subscription operating in the `ONCE` mode acts as a single request/response channel.\nThe target creates the relevant update messages, transmits them, and subsequently closes the RPC.\n\nIn this subscription mode, `gNMIc` server supports the `updates-only` knob.\n\n#### [Poll](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35153-poll-subscriptions)\n\nPolling subscriptions are used for on-demand retrieval of data items via long-lived RPCs. A poll subscription relates to a certain set of subscribed paths, and is initiated by sending a SubscribeRequest message with encapsulated SubscriptionList. Subscription messages contained within the SubscriptionList indicate the set of paths that are of interest to the polling client.\n\n#### [Stream](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions)\n\nStream subscriptions are long-lived subscriptions which continue to transmit updates relating to the set of paths that are covered within the subscription indefinitely.\n\nIn this subscription mode, `gNMIc` server supports the `updates-only` knob.\n\n##### On Change\n\nWhen a subscription is defined to be `on-change`, data updates are only sent to the client when the value of the data item changes.\n\nIn the case of `gNMIc` gNMI server, `on-change` subscriptions depend on the subscription writing data to the local cache,\nif it is a `sample` subscription, each update from a target will trigger an `on-change` update to the server client.\n\n`gNMIc` gNMI server supports `on-change` subscriptions with `heartbeat-interval`.\nIf the `heartbeat-interval` value is set to a non zero value, the value of the data item(s) MUST be re-sent once per heartbeat interval regardless of whether the value has changed or not.\n\n!!! note\n    The minimum heartbeat-interval is configurable using the field `min-heartbeat-interval`. It defaults to `1s`\n\n    If the received `heartbeat-interval` value is greater than zero but lower than `min-heartbeat-interval`, the `min-heartbeat-interval` value is used instead.\n\n##### Target Defined\n\nWhen a client creates a subscription specifying the target defined mode, the target MUST determine the best type of subscription to be created on a per-leaf basis.\n\nIn the case of `gNMIc` gNMI server, a `target-defined` stream subscription, is treated as an `on-change` subscription.\n\nNote that this does not mean that `gNMIc` will filter out the unchanged values received from a sample subscription to the actual targets.\n\n##### Sample\n\nA `sample` subscription is one where data items are sent to the client once per `sample-interval`.\n\nThe minimum supported `sample-interval` is configurable using the field `min-sample-interval`, defaults to `1ms`.\n\nIf within a `SubscribeRequest` the received `sample-interval` is zero, the `default-sample-interval` is used, defaults to `1s`.\n\n## Configuration\n\n```yaml\ngnmi-server:\n  # the address the gNMI server will listen to\n  address: :57400\n  # tls config\n  tls:\n    # string, path to the CA certificate file,\n    # this certificate is used to verify the clients certificates.\n    ca-file:\n    # string, server certificate file.\n    cert-file:\n    # string, server key file.\n    key-file:\n    # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n    #  - request:         The server requests a certificate from the client but does not \n    #                     require the client to send a certificate. \n    #                     If the client sends a certificate, it is not required to be valid.\n    #  - require:         The server requires the client to send a certificate and does not \n    #                     fail if the client certificate is not valid.\n    #  - verify-if-given: The server requests a certificate, \n    #                     does not fail if no certificate is sent. \n    #                     If a certificate is sent it is required to be valid.\n    #  - require-verify:  The server requires the client to send a valid certificate.\n    #\n    # if no ca-file is present, `client-auth` defaults to \"\"`\n    # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n    client-auth: \"\"\n  max-subscriptions: 64\n  # maximum number of active Get/Set RPCs\n  max-unary-rpc: 64\n  # Unary RPC request timeout\n  unary-rpc-timeout: 120s\n  # defines the maximum msg size (in bytes) the server can receive, \n  # defaults to 4MB\n  max-recv-msg-size:\n  # defines the maximum msg size (in bytes) the server can send,\n  # default to MaxInt32 (2147483647 bytes or 2.147483647 Gb)\n  max-send-msg-size:\n  # defines the maximum number of streams per streaming RPC.\n  max-concurrent-streams:\n  # defines the TCP keepalive tiem and interval for client connections, \n  # if unset it is enabled based on the OS. If negative it is disabled.\n  tcp-keepalive: \n  # set keepalive and max-age parameters on the server-side.\n  keepalive:\n    # MaxConnectionIdle is a duration for the amount of time after which an\n    # idle connection would be closed by sending a GoAway. Idleness duration is\n    # defined since the most recent time the number of outstanding RPCs became\n    # zero or the connection establishment.\n    # The current default value is infinity.\n    max-connection-idle:\n    # MaxConnectionAge is a duration for the maximum amount of time a\n    # connection may exist before it will be closed by sending a GoAway. A\n    # random jitter of +/-10% will be added to MaxConnectionAge to spread out\n    # connection storms.\n    # The current default value is infinity.\n    max-connection-age:\n    # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after\n    # which the connection will be forcibly closed.\n    # The current default value is infinity.\n    max-connection-age-grace:\n    # After a duration of this time if the server doesn't see any activity it\n    # pings the client to see if the transport is still alive.\n    # If set below 1s, a minimum value of 1s will be used instead.\n    # The current default value is 2 hours.\n    time: 120m\n    # After having pinged for keepalive check, the server waits for a duration\n    # of Timeout and if no activity is seen even after that the connection is\n    # closed.\n    # The current default value is 20 seconds.\n    timeout: 20s\n  # defines the minimum allowed sample interval, this value is used when the received sample-interval \n  # is greater than zero but lower than this minimum value.\n  min-sample-interval: 1ms\n  # defines the default sample interval, \n  # this value is used when the received sample-interval is zero within a stream/sample subscription.\n  default-sample-interval: 1s\n  # defines the minimum heartbeat-interval\n  # this value is used when the received heartbeat-interval is greater than zero but\n  # lower than this minimum value\n  min-heartbeat-interval: 1s\n  # enables the collection of Prometheus gRPC server metrics\n  enable-metrics: false\n  # enable additional debug logs\n  debug: false\n  # Enables Consul service registration\n  service-registration:\n    # Consul server address, default to localhost:8500\n    address:\n    # Consul Data center, defaults to dc1\n    datacenter: \n    # Consul username, to be used as part of HTTP basicAuth\n    username:\n    # Consul password, to be used as part of HTTP basicAuth\n    password:\n    # Consul Token, is used to provide a per-request ACL token \n    # which overrides the agent's default token\n    token:\n    # gnmi server service check interval, only TTL Consul check is enabled\n    # defaults to 5s\n    check-interval:\n    # Maximum number of failed checks before the service is deleted by Consul\n    # defaults to 3\n    max-fail:\n    # Consul service name\n    name:\n    # List of tags to be added to the service registration, \n    # if available, the instance-name and cluster-name will be added as tags,\n    # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name\n    tags:\n  # cache configuration\n  cache:\n    # cache type, defaults to `oc`\n    type: oc\n    # string, address of the remote cache server,\n    # irrelevant if type is `oc`\n    address:\n    # string, the remote server username.\n    username:\n    # string, the remote server password.\n    password:\n    # string, expiration period of received messages.\n    expiration: 60s\n    # enable extra logging\n    debug: false\n    # int64, default: 1073741824 (1 GiB). \n    # Max number of bytes stored in the cache per subscription.\n    max-bytes:\n    # int64, default: 1048576. \n    # Max number of messages stored per subscription.\n    max-msgs-per-subscription:\n    # int, default 100. \n    # Batch size used by the JetStream pull subscriber.\n    fetch-batch-size:\n    # duration, default 100ms. \n    # Wait time used by the JetStream pull subscriber.\n    fetch-wait-time:  \n```\n\n### Secure vs Insecure Server\n\n#### Insecure Mode\n\nBy default, the server runs in insecure mode, as long as `skip-verify` is false and none of `ca-file`, `cert-file` and `key-file` are set.\n\n#### Secure Mode\n\nTo run this gNMI server in secure mode, there are a few options:\n\n- **Using self signed certificates, without client certificate verification:**\n\n```yaml\ngnmi-server:\n  skip-verify: true\n```\n\n- **Using self signed certificates, with client certificate verification:**\n\n```yaml\ngnmi-server:\n# a valid CA certificate to verify the client provided certificates\n  ca-file: /path/to/caFile \n```\n  \n- **Using CA provided certificates, without client certificate verification:**\n\n```yaml\ngnmi-server:\n  skip-verify: true\n  # a valid server certificate\n  cert-file: /path/to/server-cert\n  # a valid server key\n  key-file:  /path/to/server-key\n```\n\n- **Using CA provided certificates, with client certificate verification:**\n\n```yaml\ngnmi-server:\n  # a valid CA certificate to verify the client provided certificates\n  ca-file: /path/to/caFile \n  # a valid server certificate\n  cert-file: /path/to/server-cert\n  # a valid server key\n  key-file:  /path/to/server-key\n```\n\n### Fields\n\n#### address\n\nDefines the address the gNMI server will listen to.\n\nThis can be a tcp socket in the format `<addr:port>` or a unix socket starting with `unix:///`\n\n#### skip-verify\n\nIf true, the server will not verify the client's certificates.\n\n#### ca-file\n\nDefines the path to the CA certificate file to be used, irrelevant if `skip-verify` is true\n\n#### cert-file\n\nDefines the path to the server certificate file to be used.\n\n#### key-file\n\nDefines the path to the server key file to be used.\n\n#### max-subscriptions\n\nDefines the maximum number of allowed subscriptions.\n\nDefaults to `64`.\n\n#### max-unary-rpc\n\nDefines the maximum number of active Get/Set RPCs.\n\nDefaults to `64`.\n\n#### min-sample-interval\n\nDefines the minimum allowed sample interval, this value is used when the received sample-interval\nis greater than zero but lower than this minimum value.\n\nDefaults to `1ms`\n  \n#### default-sample-interval\n\nDefines the default sample interval,\nthis value is used when the received sample-interval is zero within a stream/sample subscription.\n\nDefaults to `1s`\n\n#### min-heartbeat-interval\n\nDefines the minimum heartbeat-interval,\nthis value is used when the received heartbeat-interval is greater than zero but\nlower than this minimum value.\n\nDefaults to `1s`\n\n#### enable-metrics\n\nEnables the collection of Prometheus gRPC server metrics.\n\n#### debug\n\nEnables additional debug logging.\n\n## Caching\n\nBy default, the gNMI server uses Openconfig's gNMI cache as a backend.\n\nDistributed caching is supported using any of the other cache types specified [here](caching.md#cache-types).\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmi_server_cache.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmi_server_cache.drawio\" async></script>\n\nWhen a distributed cache is used together with the gNMI server feature, a gNMI client can subscribe to any of the gNMI servers to get gNMI updates collected from all the targets.\n\nOn the other hand, if the gNMI client sends a unary RPC (Get, Set), it will have be directed to the gNMI server directly connected to the target.\n\n```yaml\ngnmi-server:\n  #\n  # other gnmi-server attributes\n  #\n  cache:\n    # cache type, defaults to `oc`\n    type: oc # redis, nats or jetstream\n    # string, address of the remote cache server,\n    # irrelevant if type is `oc`\n    address:\n    # string, the remote server username.\n    username:\n    # string, the remote server password.\n    password:\n    # string, expiration period of received messages.\n    expiration: 60s\n    # enable extra logging.\n    debug: false\n    # int64, default: 1073741824 (1 GiB). \n    # Max number of bytes stored in the cache per subscription.\n    max-bytes:\n    # int64, default: 1048576. \n    # Max number of messages stored per subscription.\n    max-msgs-per-subscription:\n    # int, default 100. \n    # Batch size used by the JetStream pull subscriber.\n    fetch-batch-size:\n    # duration, default 100ms. \n    # Wait time used by the JetStream pull subscriber.\n    fetch-wait-time:\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/examples/capabilities.md",
    "content": "\nThe below snippet shows how to create a target, send a Capabilities Request and print the response.\n\n```golang\npackage main\n\nimport (\n    \"context\"\n    \"fmt\"\n    \"log\"\n\n    \"github.com/openconfig/gnmic/pkg/api\"\n    \"google.golang.org/protobuf/encoding/prototext\"\n)\n\nfunc main() {\n    // create a target\n    tg, err := api.NewTarget(\n        api.Name(\"srl1\"),\n        api.Address(\"10.0.0.1:57400\"),\n        api.Username(\"admin\"),\n        api.Password(\"admin\"),\n        api.SkipVerify(true),\n    )\n    if err != nil {\n        log.Fatal(err)\n    }\n\n    ctx, cancel := context.WithCancel(context.Background())\n    defer cancel()\n    \n    // create a gNMI client\n    err = tg.CreateGNMIClient(ctx)\n    if err != nil {\n        log.Fatal(err)\n    }\n    defer tg.Close()\n\n    // send a gNMI capabilities request to the created target\n    capResp, err := tg.Capabilities(ctx)\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(capResp))\n}\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/examples/get.md",
    "content": "\nThe below snippet shows how to create a target, send a Get Request and print the response.\n\n```golang\npackage main\n\nimport (\n    \"context\"\n    \"fmt\"\n    \"log\"\n\n    \"github.com/openconfig/gnmic/pkg/api\"\n    \"google.golang.org/protobuf/encoding/prototext\"\n)\n\nfunc main() {\n    // create a target\n    tg, err := api.NewTarget(\n        api.Name(\"srl1\"),\n        api.Address(\"10.0.0.1:57400\"),\n        api.Username(\"admin\"),\n        api.Password(\"admin\"),\n        api.SkipVerify(true),\n    )\n    if err != nil {\n        log.Fatal(err)\n    }\n\n    ctx, cancel := context.WithCancel(context.Background())\n    defer cancel()\n\n    // create a gNMI client\n    err = tg.CreateGNMIClient(ctx)\n    if err != nil {\n        log.Fatal(err)\n    }\n    defer tg.Close()\n\n    // create a GetRequest\n    getReq, err := api.NewGetRequest(\n        api.Path(\"/system/name\"),\n        api.Encoding(\"json_ietf\"))\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(getReq))\n    \n    // send the created gNMI GetRequest to the created target\n    getResp, err := tg.Get(ctx, getReq)\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(getResp))\n}\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/examples/set.md",
    "content": "\nThe below snippet shows how to create a target, send a Set Request and print the reponse.\n\n```golang\npackage main\n\nimport (\n    \"context\"\n    \"fmt\"\n    \"log\"\n\n    \"github.com/openconfig/gnmic/pkg/api\"\n    \"google.golang.org/protobuf/encoding/prototext\"\n)\n\nfunc main() {\n    // create a target\n    tg, err := api.NewTarget(\n        api.Name(\"srl1\"),\n        api.Address(\"10.0.0.1:57400\"),\n        api.Username(\"admin\"),\n        api.Password(\"admin\"),\n        api.SkipVerify(true),\n    )\n    if err != nil {\n        log.Fatal(err)\n    }\n\n    ctx, cancel := context.WithCancel(context.Background())\n    defer cancel()\n    \n    err = tg.CreateGNMIClient(ctx)\n    if err != nil {\n        log.Fatal(err)\n    }\n    defer tg.Close()\n\n    // create a gNMI SetRequest\n    setReq, err := api.NewSetRequest(\n        api.Update(\n            api.Path(\"/system/name/host-name\"),\n            api.Value(\"srl2\", \"json_ietf\")),\n    )\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(setReq))\n\n    // send the created gNMI SetRequest to the created target\n    setResp, err := tg.Set(ctx, setReq)\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(setResp))\n}\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/examples/subscribe.md",
    "content": "\nThe below snippet shows how to create a target and a Subscribe Request.\nIt then starts a Stream subscription with 10s interval and listens to Responses and errors.\n\n```golang\npackage main\n\nimport (\n    \"context\"\n    \"fmt\"\n    \"log\"\n    \"time\"\n\n    \"github.com/openconfig/gnmic/pkg/api\"\n    \"google.golang.org/protobuf/encoding/prototext\"\n)\n\nfunc main() {\n    // create a target\n    tg, err := api.NewTarget(\n        api.Name(\"srl1\"),\n        api.Address(\"srl1:57400\"),\n        api.Username(\"admin\"),\n        api.Password(\"admin\"),\n        api.SkipVerify(true),\n    )\n    if err != nil {\n        log.Fatal(err)\n    }\n\n    ctx, cancel := context.WithCancel(context.Background())\n    defer cancel()\n    err = tg.CreateGNMIClient(ctx)\n    if err != nil {\n        log.Fatal(err)\n    }\n    defer tg.Close()\n    // create a gNMI subscribeRequest\n    subReq, err := api.NewSubscribeRequest(\n        api.Encoding(\"json_ietf\"),\n        api.SubscriptionListMode(\"stream\"),\n        api.Subscription(\n            api.Path(\"system/name\"),\n            api.SubscriptionMode(\"sample\"),\n            api.SampleInterval(10*time.Second),\n        ))\n    if err != nil {\n        log.Fatal(err)\n    }\n    fmt.Println(prototext.Format(subReq))\n    // start the subscription\n    go tg.Subscribe(ctx, subReq, \"sub1\")\n    // start a goroutine that will stop the subscription after x seconds\n    go func() {\n        select {\n        case <-ctx.Done():\n            return\n        case <-time.After(42 * time.Second):\n            tg.StopSubscription(\"sub1\")\n        }\n    }()\n    subRspChan, subErrChan := tg.ReadSubscriptions()\n    for {\n        select {\n        case rsp := <-subRspChan:\n            fmt.Println(prototext.Format(rsp.Response))\n        case tgErr := <-subErrChan:\n            log.Fatalf(\"subscription %q stopped: %v\", tgErr.SubscriptionName, tgErr.Err)\n        }\n    }\n}\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/gnmi_options.md",
    "content": "\nThe package `github.com/openconfig/gnmic/pkg/api` exposes a set of `api.GNMIOption` that can be used with \n`api.NewGetRequest(...api.GNMIOption) GNMIOption`, `api.NewSetRequest(...api.GNMIOption) GNMIOption` or `api.NewSubscribeRequest(...api.GNMIOption) GNMIOption` to create a gNMI Request.\n\n```golang\n// Version sets the provided gNMI version string in a gnmi.CapabilityResponse message.\nfunc Version(v string) func(msg proto.Message) error\n```\n\n```golang\n// SupportedEncoding creates an GNMIOption that sets the provided encodings as supported encodings in a gnmi.CapabilitiesResponse\nfunc SupportedEncoding(encodings ...string) func(msg proto.Message) error\n```\n\n```golang\n// SupportedModel creates an GNMIOption that sets the provided name, org and version as a supported model in a gnmi.CapabilitiesResponse.\nfunc SupportedModel(name, org, version string) func(msg proto.Message) error\n```\n\n```golang\n// Extension creates a GNMIOption that applies the supplied gnmi_ext.Extension to the provided\n// proto.Message.\nfunc Extension(ext *gnmi_ext.Extension) func(msg proto.Message) error\n```\n\n```golang\n// Prefix creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied\n// proto.Message (as a Path Prefix).\n// The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Prefix(prefix string) func(msg proto.Message) error\n```\n\n```golang\n// Target creates a GNMIOption that set the gnmi Prefix target to the supplied string value.\n// The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Target(target string) func(msg proto.Message) error\n```\n\n```golang\n// Path creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.Subscription.\nfunc Path(path string) func(msg proto.Message) error\n```\n\n```golang\n// Encoding creates a GNMIOption that adds the encoding type to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Encoding(encoding string) func(msg proto.Message) error\n```\n\n```golang\n// EncodingJSON creates a GNMIOption that sets the encoding type to JSON in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingJSON() func(msg proto.Message) error\n```\n\n```golang\n// EncodingBytes creates a GNMIOption that sets the encoding type to BYTES in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingBytes() func(msg proto.Message) error\n```\n\n```golang\n// EncodingPROTO creates a GNMIOption that sets the encoding type to PROTO in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingPROTO() func(msg proto.Message) error\n```\n\n```golang\n// EncodingASCII creates a GNMIOption that sets the encoding type to ASCII in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingASCII() func(msg proto.Message) error\n```\n\n```golang\n// EncodingJSON_IETF creates a GNMIOption that sets the encoding type to JSON_IETF in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingJSON_IETF() func(msg proto.Message) error\n```\n\n```golang\n// EncodingCustom creates a GNMIOption that adds the encoding type to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\n// Unlike Encoding, this GNMIOption does not validate if the provided encoding is defined by the gNMI spec.\nfunc EncodingCustom(enc int) func(msg proto.Message) error\n```\n\n```golang\n// DataType creates a GNMIOption that adds the data type to the supplied proto.Message\n// which must be a *gnmi.GetRequest.\nfunc DataType(datat string) func(msg proto.Message) error\n```\n\n```golang\n// DataTypeALL creates a GNMIOption that sets the gnmi.GetRequest data type to ALL\nfunc DataTypeALL() func(msg proto.Message) error\n```\n\n```golang\n// DataTypeCONFIG creates a GNMIOption that sets the gnmi.GetRequest data type to CONFIG\nfunc DataTypeCONFIG() func(msg proto.Message) error\n```\n\n```golang\n// DataTypeSTATE creates a GNMIOption that sets the gnmi.GetRequest data type to STATE\nfunc DataTypeSTATE() func(msg proto.Message) error\n```\n\n```golang\n// DataTypeOPERATIONAL creates a GNMIOption that sets the gnmi.GetRequest data type to OPERATIONAL\nfunc DataTypeOPERATIONAL() func(msg proto.Message) error\n```\n\n```golang\n// UseModel creates a GNMIOption that add a gnmi.DataModel to a gnmi.GetRequest or gnmi.SubscribeRequest\n// based on the name, org and version strings provided.\nfunc UseModel(name, org, version string) func(msg proto.Message) error\n```\n\n```golang\n// Update creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message,\n// the supplied message must be a *gnmi.SetRequest.\nfunc Update(opts ...GNMIOption) func(msg proto.Message) error\n```\n\n```golang\n// Replace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.SetRequest.\nfunc Replace(opts ...GNMIOption) func(msg proto.Message) error\n```\n\n```golang\n// Value creates a GNMIOption that creates a *gnmi.TypedValue and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.Update.\n// If a map is supplied as `data interface{}` it has to be a map[string]interface{}.\nfunc Value(data interface{}, encoding string) func(msg proto.Message) error\n```\n\n```golang\n// Delete creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.SetRequest. The *gnmi.Path is added the .Delete list.\nfunc Delete(path string) func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionListMode creates a GNMIOption that sets the SubscribeRequest Mode.\n// The variable mode must be one of \"once\", \"poll\" or \"stream\".\n// The supplied proto.Message must be a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc SubscriptionListMode(mode string) func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionListModeSTREAM creates a GNMIOption that sets the Subscription List Mode to STREAM\nfunc SubscriptionListModeSTREAM() func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionListModeONCE creates a GNMIOption that sets the Subscription List Mode to ONCE\nfunc SubscriptionListModeONCE() func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionListModePOLL creates a GNMIOption that sets the Subscription List Mode to POLL\nfunc SubscriptionListModePOLL() func(msg proto.Message) error\n```\n\n```golang\n// Qos creates a GNMIOption that sets the QosMarking field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Qos(qos uint32) func(msg proto.Message) error\n```\n\n```golang\n// UseAliases creates a GNMIOption that sets the UsesAliases field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc UseAliases(b bool) func(msg proto.Message) error\n```\n\n```golang\n// AllowAggregation creates a GNMIOption that sets the AllowAggregation field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc AllowAggregation(b bool) func(msg proto.Message) error\n```\n\n```golang\n// UpdatesOnly creates a GNMIOption that sets the UpdatesOnly field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc UpdatesOnly(b bool) func(msg proto.Message) error\n```\n\n```golang\n// UpdatesOnly creates a GNMIOption that creates a *gnmi.Subscription based on the supplied GNMIOption(s) and adds it the\n// supplied proto.Message which must be of type *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Subscription(opts ...GNMIOption) func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionMode creates a GNMIOption that sets the Subscription mode in a proto.Message of type *gnmi.Subscription.\nfunc SubscriptionMode(mode string) func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionModeTARGET_DEFINED creates a GNMIOption that sets the subscription mode to TARGET_DEFINED\nfunc SubscriptionModeTARGET_DEFINED() func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionModeON_CHANGE creates a GNMIOption that sets the subscription mode to ON_CHANGE\nfunc SubscriptionModeON_CHANGE() func(msg proto.Message) error\n```\n\n```golang\n// SubscriptionModeSAMPLE creates a GNMIOption that sets the subscription mode to SAMPLE\nfunc SubscriptionModeSAMPLE() func(msg proto.Message) error\n```\n\n```golang\n// SampleInterval creates a GNMIOption that sets the SampleInterval in a proto.Message of type *gnmi.Subscription.\nfunc SampleInterval(d time.Duration) func(msg proto.Message) error\n```\n\n```golang\n// HeartbeatInterval creates a GNMIOption that sets the HeartbeatInterval in a proto.Message of type *gnmi.Subscription.\nfunc HeartbeatInterval(d time.Duration) func(msg proto.Message) error\n```\n\n```golang\n// SuppressRedundant creates a GNMIOption that sets the SuppressRedundant in a proto.Message of type *gnmi.Subscription.\nfunc SuppressRedundant(s bool) func(msg proto.Message) error\n```\n\n```golang\n// Notification creates a GNMIOption that builds a gnmi.Notification from the supplied GNMIOptions and adds it\n// to the supplied proto.Message\nfunc Notification(opts ...GNMIOption) func(msg proto.Message) error\n```\n\n```golang\n// Timestamp sets the supplied timestamp in a gnmi.Notification message\nfunc Timestamp(t int64) func(msg proto.Message) error\n```\n\n```golang\n// TimestampNow is the same as Timestamp(time.Now().UnixNano())\nfunc TimestampNow() func(msg proto.Message) error\n```\n\n```golang\n// Alias sets the supplied alias value in a gnmi.Notification message\nfunc Alias(alias string) func(msg proto.Message) error\n```\n\n```golang\n// Atomic sets the .Atomic field in a gnmi.Notification message\nfunc Atomic(b bool) func(msg proto.Message) error\n```\n\n```golang\n// UpdateResult creates a GNMIOption that creates a gnmi.UpdateResult and adds it to\n// a proto.Message of type gnmi.SetResponse.\nfunc UpdateResult(opts ...GNMIOption) func(msg proto.Message) error\n```\n\n```golang\n// Operation creates a GNMIOption that sets the gnmi.UpdateResult_Operation\n// value in a gnmi.UpdateResult.\nfunc Operation(oper string) func(msg proto.Message) error\n```\n\n```golang\n// OperationINVALID creates a GNMIOption that sets the gnmi.SetResponse Operation to INVALID\nfunc OperationINVALID() func(msg proto.Message) error\n```\n\n```golang\n// OperationDELETE creates a GNMIOption that sets the gnmi.SetResponse Operation to DELETE\nfunc OperationDELETE() func(msg proto.Message) error\n```\n\n```golang\n// OperationREPLACE creates a GNMIOption that sets the gnmi.SetResponse Operation to REPLACE\nfunc OperationREPLACE() func(msg proto.Message) error\n```\n\n```golang\n// OperationUPDATE creates a GNMIOption that sets the gnmi.SetResponse Operation to UPDATE\nfunc OperationUPDATE() func(msg proto.Message) error\n```\n"
  },
  {
    "path": "docs/user_guide/golang_package/intro.md",
    "content": "`gnmic` (`github.com/openconfig/gnmic/pkg/api`) can be imported as a dependency in your Golang programs.\n\nIt acts as a wrapper around the `openconfig/gnmi` package providing a user friendly API to create a target and easily craft gNMI requests.\n\n## Creating gNMI requests\n\n### Get Request\n\n```golang\nfunc NewGetRequest(opts ...GNMIOption) (*gnmi.GetRequest, error)\n```\n\nThe below 2 snippets create a Get Request with 2 paths, `json_ietf` encoding and data type `STATE`\n\nUsing `github.com/openconfig/gnmic/pkg/api`\n\n```golang\ngetReq, err := api.NewGetRequest(\n    api.Encoding(\"json_ietf\"),\n    api.DataType(\"state\"),    \n    api.Path(\"interface/statistics\"),    \n    api.Path(\"interface/subinterface/statistics\"),\n)\n// check error\n```\n\nUsing `github.com/openconfig/gnmi`\n\n```golang\ngetReq := &gnmi.GetRequest{\n        Path: []*gnmi.Path{\n            {\n                Elem: []*gnmi.PathElem{\n                    {Name: \"interface\"},\n                    {Name: \"statistics\"},\n                },\n            },\n            {\n                Elem: []*gnmi.PathElem{\n                    {Name: \"interface\"},\n                    {Name: \"subinterface\"},\n                    {Name: \"statistics\"},\n                },\n            },\n        },\n        Type:     gnmi.GetRequest_STATE,\n        Encoding: gnmi.Encoding_JSON_IETF,\n    }\n```\n\n### Set Request\n\n```golang\nfunc NewSetRequest(opts ...GNMIOption) (*gnmi.SetRequest, error)\n```\n\nThe below 2 snippets create a Set Request with one two updates, one replace and one delete messages:\n\nUsing `github.com/openconfig/gnmic/pkg/api`\n\n```golang\nsetReq, err := api.NewSetRequest(\n    api.Update(\n        api.Path(\"/system/name/host-name\"),\n        api.Value(\"srl2\", \"json_ietf\"),\n    ),\n    api.Update(\n        api.Path(\"/system/gnmi-server/unix-socket/admin-state\"),\n        api.Value(\"enable\", \"json_ietf\"),\n    ),\n    api.Replace(\n        api.Path(\"/network-instance[name=default]/admin-state\"),\n        api.Value(\"enable\", \"json_ietf\"),\n    ),\n    api.Delete(\"/interface[name=ethernet-1/1]/admin-state\"),\n)\n// check error\n```\n\nUsing `github.com/openconfig/gnmi`\n\n```golang\nsetReq := &gnmi.SetRequest{\n    Update: []*gnmi.Update{\n        {\n            Path: &gnmi.Path{\n                Elem: []*gnmi.PathElem{\n                    {Name: \"system\"},\n                    {Name: \"name\"},\n                    {Name: \"host-name\"},\n                },\n            },\n            Val: &gnmi.TypedValue{\n                Value: &gnmi.TypedValue_JsonIetfVal{\n                    JsonIetfVal: []byte(\"\\\"srl2\\\"\"),\n                },\n            },\n        },\n        {\n            Path: &gnmi.Path{\n                Elem: []*gnmi.PathElem{\n                    {Name: \"system\"},\n                    {Name: \"gnmi-server\"},\n                    {Name: \"unix-socket\"},\n                    {Name: \"admin-state\"},\n                },\n            },\n            Val: &gnmi.TypedValue{\n                Value: &gnmi.TypedValue_JsonIetfVal{\n                    JsonIetfVal: []byte(\"\\\"enable\\\"\"),\n                },\n            },\n        },\n    },\n    Replace: []*gnmi.Update{\n        {\n            Path: &gnmi.Path{\n                Elem: []*gnmi.PathElem{\n                    {\n                        Name: \"network-instance\",\n                        Key: map[string]string{\n                            \"name\": \"default\",\n                        },\n                    },\n                    {\n                        Name: \"admin-state\",\n                    },\n                },\n            },\n            Val: &gnmi.TypedValue{\n                Value: &gnmi.TypedValue_JsonIetfVal{\n                    JsonIetfVal: []byte(\"\\\"enable\\\"\"),\n                },\n            },\n        },\n    },\n    Delete: []*gnmi.Path{\n        {\n            Elem: []*gnmi.PathElem{\n                {\n                    Name: \"interface\",\n                    Key: map[string]string{\n                        \"name\": \"ethernet-1/1\",\n                    },\n                },\n                {\n                    Name: \"admin-state\",\n                },\n            },\n        },\n    },\n}\n```\n\n### Subscribe Request\n\nCreate a Subscribe Request\n\n```golang\nfunc NewSubscribeRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error)\n```\n\nCreate a Subscribe Poll Request\n\n```golang\nfunc NewSubscribePollRequest(opts ...GNMIOption) *gnmi.SubscribeRequest\n```\n\nThe below 2 snippets create a `stream` subscribe request with 2 paths, `json_ietf` encoding and a sample interval of 10 seconds:\n\nUsing `github.com/openconfig/gnmic/pkg/api`\n\n```golang\nsubReq, err := api.NewSubscribeRequest(\n    api.Encoding(\"json_ietf\"),\n    api.SubscriptionListMode(\"stream\"),\n    api.Subscription(\n        api.Path(\"interface/statistics\"),\n        api.SubscriptionMode(\"sample\"),\n        api.SampleInterval(\"10s\"),\n    ),\n    api.Subscription(\n        api.Path(\"interface/subinterface/statistics\"),\n        api.SubscriptionMode(\"sample\"),\n        api.SampleInterval(\"10s\"),\n    ),\n)\n// check error\n```\n\nUsing `github.com/openconfig/gnmi`\n\n```golang\nsubReq := &gnmi.SubscribeRequest_Subscribe{\n    Subscribe: &gnmi.SubscriptionList{\n        Subscription: []*gnmi.Subscription{\n            {\n                Path: &gnmi.Path{\n                    Elem: []*gnmi.PathElem{\n                        {Name: \"interface\"},\n                        {Name: \"statistics\"},\n                    },\n                },\n                Mode:           gnmi.SubscriptionMode_SAMPLE,\n                SampleInterval: uint64(10 * time.Second),\n            },\n            {\n                Path: &gnmi.Path{\n                    Elem: []*gnmi.PathElem{\n                        {Name: \"interface\"},\n                        {Name: \"subinterface\"},\n                        {Name: \"statistics\"},\n                    },\n                },\n                Mode:           gnmi.SubscriptionMode_SAMPLE,\n                SampleInterval: uint64(10 * time.Second),\n            },\n        },\n        Mode:     gnmi.SubscriptionList_STREAM,\n        Encoding: gnmi.Encoding_JSON_IETF,\n    },\n}\n```\n\n## Creating Targets\n\nA target can be created using `func NewTarget(opts ...TargetOption) (*target.Target, error)`.\n\nThe full list of `api.TargetOption` can be found [here](target_options.md)\n\n```golang\ntg, err := api.NewTarget(\n    api.Name(\"srl1\"),\n    api.Address(\"10.0.0.1:57400\"),\n    api.Username(\"admin\"),\n    api.Password(\"admin\"),\n    api.SkipVerify(true),\n)\n// check error\n```\n\nOnce a Target is created, Multiple functions are available to run the desired RPCs, check the examples [here](examples/capabilities.md)\n"
  },
  {
    "path": "docs/user_guide/golang_package/target_options.md",
    "content": "\nThe package `github.com/openconfig/gnmic/pkg/api` exposes a set of `api.TargetOption` that can be used with \n`api.NewTarget(...api.TargetOption) TargetOption` to create `target.Target`.\n\n```golang\n// Name sets the target name.\nfunc Name(name string) TargetOption \n\n// Address sets the target address.\n// This Option can be set multiple times.\nfunc Address(addr string) TargetOption\n\n// Username sets the target Username.\nfunc Username(username string) TargetOption \n\n// Password sets the target Password.\nfunc Password(password string) TargetOption \n\n// Timeout sets the gNMI client creation timeout.\nfunc Timeout(timeout time.Duration) TargetOption\n\n// Insecure sets the option to create a gNMI client with an\n// insecure gRPC connection\nfunc Insecure(i bool) TargetOption \n\n// SkipVerify sets the option to create a gNMI client with a\n// secure gRPC connection without verifying the target's certificates.\nfunc SkipVerify(i bool) TargetOption \n\n// TLSCA sets that path towards the TLS certificate authority file.\nfunc TLSCA(tlsca string) TargetOption \n\n// TLSCert sets that path towards the TLS certificate file.\nfunc TLSCert(cert string) TargetOption \n\n// TLSKey sets that path towards the TLS key file.\nfunc TLSKey(key string) TargetOption \n\n// TLSMinVersion sets the TLS minimum version used during the TLS handshake.\nfunc TLSMinVersion(v string) TargetOption \n\n// TLSMaxVersion sets the TLS maximum version used during the TLS handshake.\nfunc TLSMaxVersion(v string) TargetOption\n\n// TLSVersion sets the desired TLS version used during the TLS handshake.\nfunc TLSVersion(v string) TargetOption \n\n// LogTLSSecret, if set to true,\n// enables logging of the TLS master key.\nfunc LogTLSSecret(b bool) TargetOption \n\n// Gzip, if set to true,\n// adds gzip compression to the gRPC connection.\nfunc Gzip(b bool) TargetOption \n\n// Token sets the per RPC credentials for all RPC calls. \nfunc Token(token string) TargetOption\n```\n"
  },
  {
    "path": "docs/user_guide/inputs/input_intro.md",
    "content": "`gnmic` supports various Inputs to consume gnmi data, transform it and ultimately export it to one or multiple Outputs.\n\nThe purpose of `gnmic`'s Inputs is to build a gnmi data pipeline by enabling the ingestion and export of gnmi data that was exported by `gnmic`'s outputs upstream.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmic_inputs_intro&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmic_inputs_intro\" async></script>\n\n\nCurrently supported input types:\n\n* [NATS messaging system](nats_input.md)\n* [NATS Streaming messaging bus (STAN)](stan_input.md)\n* [Kafka messaging bus](kafka_input.md)\n\n### Defining Inputs and matching Outputs\n\nTo define an Input a user needs to fill in the `inputs` section in the configuration file.\n\nEach Input is defined by its name (`input1` in the example below), a `type` field which determines the type of input to be created (`nats`, `stan`, `kafka`) and various other configuration fields which depend on the Input type.\n\n!!! note\n    Inputs names are case insensitive\n\nAll Input types have an `outputs` field, under which the user can defined the downstream destination(s) of the consumed data.\nThis way, data consumed once, can be exported multiple times.\n\n!!!info\n    The same `gnmic` instance can act as gNMI collector, input and output simultaneously.\n\nExample:\n\n```yaml\n# part of gnmic config file\ninputs:\n  input1:\n    type: nats # input type\n    #\n    # other config fields depending on the input type\n    #\n    outputs:\n      - output1\n      - output2\n```\n\n### Inputs use cases\n\n#### Clustering\nUsing `gnmic` Inputs, the user can aggregate all the collected data into one instance of `gnmic` that can make it available to a downstream off the shelf tool,typically Prometheus.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams//gnmic_inputs_clustering&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/gnmic_inputs_clustering\" async></script>\n\n\n#### Data reuse\nCollect data once and use it multiple times. By chaining multiple instances of `gnmic` the user can process the same stream of data in different ways.\n\nA different set of event processors can be applied on the data stream before being exported to its intended outputs.\n\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmic_input_data_reuse&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmic_input_data_reuse\" async></script>\n\n\n"
  },
  {
    "path": "docs/user_guide/inputs/jetstream_input.md",
    "content": "When using jetstream as an input, `gnmic` consumes data from a specified NATS JetStream stream using a durable consumer. Messages are fetched in batches and delivered to `gnmic` in either `event` or `proto` format.\n\nEach gNMIc instance creates one durable consumer using the configured `subjects` field. Multiple workers (subscribers) can be spawned using the `num-workers` option to increase processing throughput. All workers within a single gNMIc instance share the same durable consumer to ensure coordinated message processing.\n\nFor scaling across multiple consumers, deploy multiple gNMIc instances with different consumer names. Each instance will create its own durable consumer on the stream.\n\nThe `jetstream` input will export received messages to the configured `outputs`. Optionally, `event-processors` can be applied when using event format.\n\n```yaml\ninputs:\n  js-input:\n    # required string, type of input\n    type: jetstream\n\n    # optional string, input instance name\n    # defaults to a generated name if empty\n    name: js-consumer\n\n    # string, NATS server address\n    # default: \"localhost:4222\"\n    address: nats.example.com:4222\n\n    # string, name of the JetStream stream to consume from\n    stream: telemetry-stream\n\n    # list of subject filters within the stream to consume from\n    # the consumer will receive messages matching any of these subjects\n    subjects:\n      - telemetry.device.*\n\n    # enum string, format of consumed message: \"event\" or \"proto\"\n    # default: \"event\"\n    format: event\n\n    # enum string, delivery policy for JetStream:\n    # one of: all, last, new, last-per-subject\n    # default: all\n    deliver-policy: last\n\n    # optional string, subject format used to extract metadata\n    # one of: static, subscription.target, target.subscription\n    # affects proto messages only\n    subject-format: target.subscription\n\n    # optional string, NATS username\n    username: nats-user\n\n    # optional string, NATS password\n    password: secret\n\n    # optional duration, reconnect wait time\n    # default: 2s\n    connect-time-wait: 3s\n\n    # optional bool, enables debug logging\n    debug: true\n\n    # integer, number of workers to start (parallel consumers)\n    # default: 1\n    num-workers: 2\n\n    # integer, internal per-worker buffer size\n    # default: 500\n    buffer-size: 1000\n\n    # integer, batch size when fetching messages from JetStream\n    # default: 500\n    fetch-batch-size: 200\n\n    # integer, maximum number of allowed pending ack on the stream\n    # default: 1000\n    max-ack-pending: 5000\n\n    # optional list of output names this input writes to\n    # outputs must be configured at the root `outputs:` section\n    outputs:\n      - file\n      - kafka\n\n    # optional list of event processors\n    # only applies when format is \"event\"\n    event-processors:\n      - add-tags\n\n    # optional TLS configuration for secure NATS connection\n    tls:\n      ca-file: /etc/ssl/certs/ca.pem\n      cert-file: /etc/ssl/certs/cert.pem\n      key-file: /etc/ssl/certs/key.pem\n      skip-verify: false\n```\n\n## Message Formats\n\n\n- `event`: Expects JSON-encoded array of `EventMsg`. Supports processing pipelines and exporting to multiple outputs.\n\n- `proto`: Expects binary-encoded `gnmi.SubscribeResponse` messages. Metadata such as `source` and `subscription-name` is extracted from the subject based on subject-format.\n\n## Delivery Policies\n\n- `all`: Delivers all messages from the stream history.\n\n- `last`: Delivers only the most recent message.\n\n- `new`: Starts delivery from new messages only.\n\n- `last-per-subject`: Delivers the latest message for each subject.\n\n\n## Subject Format Behavior\n\nWhen using proto format, gnmic uses the subject name to extract metadata:\n\n- `subscription.target` → subscription-name = first, source = second\n\n- `target.subscription` → subscription-name = second, source = first\n\n- `static` → no parsing; no additional metadata is extracted\n\n## Scaling with Multiple Consumers\n\nEach gNMIc instance creates a single durable consumer on the stream. To scale message processing across multiple consumers:\n\n1. Deploy multiple gNMIc instances\n2. Give each instance a different consumer `name` in its configuration\n3. Configure each instance with appropriate `subjects` filters to partition work\n\n**Example - Two instances consuming different subjects:**\n\nInstance 1:\n```yaml\ninputs:\n  js-consumer-1:\n    type: jetstream\n    name: consumer-router-metrics\n    address: localhost:4222\n    stream: telemetry-stream\n    subjects:\n      - telemetry.router.*\n    num-workers: 2\n    format: event\n```\n\nInstance 2:\n```yaml\ninputs:\n  js-consumer-2:\n    type: jetstream\n    name: consumer-switch-metrics\n    address: localhost:4222\n    stream: telemetry-stream\n    subjects:\n      - telemetry.switch.*\n    num-workers: 2\n    format: event\n```\n\nEach instance creates its own durable consumer with its configured subject filters. Within each instance, multiple workers share the same consumer for parallel processing.\n\n## Usage Notes\n\n- A durable consumer is created on the stream using the provided name as the durable name.\n\n- All workers use the same durable name to share state and resume progress across reconnects.\n\n- TLS can be configured if the NATS server uses secure connections.\n\n\n"
  },
  {
    "path": "docs/user_guide/inputs/kafka_input.md",
    "content": "When using Kafka as input, `gnmic` consumes data from a specific Kafka topic in `event` or `proto` format.\n\nMultiple consumers can be created per `gnmic` instance (`num-workers`).\nAll the workers join the same [Kafka consumer group](https://docs.confluent.io/platform/current/clients/consumer.html#consumer-groups) (`group-id`) in order to load share the messages between the workers.\n\nMultiple instances of `gnmic` with the same Kafka input can be used to effectively consume the exported messages in parallel\n\nThe Kafka input will export the received messages to the list of outputs configured under its `outputs` section.\n\n```yaml\ninputs:\n  input1:\n    # string, required, specifies the type of input\n    type: kafka \n    # Kafka subscriber name\n    # If left empty, it will be populated with the string from flag --instance-name appended with `--kafka-cons`.\n    # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid`\n    # note that each kafka worker (consumer) will get name=$name-$index\n    name: \"\"\n    # Kafka SASL configuration\n    sasl:\n      # SASL user name\n      user:\n      # SASL password\n      password:\n      # SASL mechanism: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER are supported\n      mechanism:\n      # token url for OAUTHBEARER SASL mechanism\n      token-url:\n    # string, comma separated Kafka servers addresses\n    address: localhost:9092\n    # string, comma separated topics the Kafka consumer group consumes messages from.\n    topics: telemetry \n    # consumer group all gnmic Kafka input workers join, \n    # so that Kafka server can load share the messages between them. Defaults to `gnmic-consumers`\n    group-id: gnmic-consumers\n    # duration, the timeout used to detect consumer failures when using Kafka's group management facility.\n    # If no heartbeats are received by the broker before the expiration of this session timeout,\n    # then the broker will remove this consumer from the group and initiate a rebalance.\n    session-timeout: 10s\n    # duration, the expected time between heartbeats to the consumer coordinator when using Kafka's group\n\t  # management facilities.\n    heartbeat-interval: 3s\n    # duration, wait time before reconnection attempts after any error\n    recovery-wait-time: 2s \n    # string, kafka version, defaults to 2.5.0\n    version: \n    # string, consumed message expected format, one of: proto, event\n    format: event \n    # bool, enables extra logging\n    debug: false\n    # integer, number of kafka consumers to be created\n    num-workers: 1\n    # list of processors to apply on the message when received, \n    # only applies if format is 'event'\n    event-processors: \n    # []string, list of named outputs to export data to. \n    # Must be configured under root level `outputs` section\n    outputs: \n```\n\n"
  },
  {
    "path": "docs/user_guide/inputs/nats_input.md",
    "content": "When using NATS as input, `gnmic` consumes data from a specific NATS subject in `event` or `proto` format.\n\nMultiple consumers can be created per `gnmic` instance (`num-workers`).\nAll the workers join the same [NATS queue group](https://docs.nats.io/nats-concepts/queue) (`queue`) in order to load share the messages between the workers.\n\nMultiple instances of `gnmic` with the same NATS input can be used to effectively consume the exported messages in parallel\n\nThe NATS input will export the received messages to the list of outputs configured under its `outputs` section.\n\n```yaml\ninputs:\n  input1:\n    # string, required, specifies the type of input\n    type: nats \n    # NATS subscriber name\n    # If left empty, it will be populated with the string from flag --instance-name appended with `--nats-sub`.\n    # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid`\n    # note that each nats worker (subscriber) will get name=$name-$index\n    name: \"\"\n    # string, comma separated NATS servers addresses\n    address: localhost:4222\n    # The subject name gnmic NATS consumers subscribe to.\n    subject: telemetry \n    # subscribe queue group all gnmic NATS input workers join, \n    # so that NATS server can load share the messages between them.\n    queue: \n    # string, NATS username\n    username: \n    # string, NATS password  \n    password: \n    # duration, wait time before reconnection attempts\n    connect-time-wait: 2s \n    # string, consumed message expected format, one of: proto, event\n    format: event \n    # bool, enables extra logging\n    debug: false\n    # integer, number of nats consumers to be created\n    num-workers: 1\n    # integer, sets the size of the local buffer where received \n    # NATS messages are stored before being sent to outputs.\n    # This value is set per worker. Defaults to 100 messages\n    buffer-size: 100\n    # list of processors to apply on the message when received, \n    # only applies if format is 'event'\n    event-processors: \n    # []string, list of named outputs to export data to. \n    # Must be configured under root level `outputs` section\n    outputs: \n```\n\n"
  },
  {
    "path": "docs/user_guide/inputs/stan_input.md",
    "content": "When using STAN as input, `gnmic` consumes data from a specific STAN subject in `event` or `proto` format.\n\nMultiple consumers can be created per `gnmic` instance (`num-workers`).\nAll the workers join the same [STAN queue group](https://docs.stan.io/nats-concepts/queue) (`queue`) in order to load share the messages between the workers.\n\nMultiple instances of `gnmic` with the same STAN input can be used to effectively consume the exported messages in parallel\n\nThe STAN input will export the received messages to the list of outputs configured under its `outputs` section.\n\n```yaml\ninputs:\n  input1:\n    # string, required, specifies the type of input\n    type: stan \n    # STAN subscriber name\n    # If left empty, it will be populated with the string from flag --instance-name appended with `--stan-sub`.\n    # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid`\n    # note that each stan worker (subscriber) will get name=$name-$index\n    name: \"\"\n    # string, comma separated STAN servers addresses\n    address: localhost:4222\n    # The subject name gnmic STAN consumers subscribe to.\n    subject: telemetry \n    # subscribe queue group all gnmic STAN input workers join, \n    # so that STAN server can load share the messages between them.\n    queue: \n    # string, STAN username\n    username: \n    # string, STAN password  \n    password: \n    # duration, wait time before reconnection attempts\n    connect-time-wait: 2s\n    # string, the STAN cluster name. defaults to test-cluster\n    cluster-name: \n    # integer, interval (in seconds) at which \n    # a connection sends a PING to the server. min=1\n    ping-interval:\n    # integer, number of PINGs without a response \n    # before the connection is considered lost. min=2\n    ping-retry:\n    # string, consumed message expected format, one of: proto, event\n    format: event \n    # bool, enables extra logging\n    debug: false\n    # integer, number of stan consumers to be created\n    num-workers: 1\n    # list of processors to apply on the message when received, \n    # only applies if format is 'event'\n    event-processors: \n    # []string, list of named outputs to export data to. \n    # Must be configured under root level `outputs` section\n    outputs: \n```\n"
  },
  {
    "path": "docs/user_guide/outputs/asciigraph_output.md",
    "content": "`gnmic` supports displaying collected metrics as an ASCII graph on the terminal.\nThe graph is generated using the [asciigraph](https://github.com/guptarohit/asciigraph) package.\n\n### Configuration sample\n\n```yaml\n\noutputs:\n  output1:\n    # required\n    type: asciigraph\n    # string, the graph caption\n    caption: \n    # integer, the graph height. If unset, defaults to the terminal height\n    height:\n    # integer, the graph width. If unset, defaults to the terminal width\n    width:\n    # float, the graph minimum value for the vertical axis.\n    lower-bound:\n    # float, the graph minimum value for the vertical axis.\n    upper-bound:\n    # integer, the graph left offset.\n    offset:\n    # integer, the decimal point precision of the label values.\n    precision:\n    # string, the caption color. one of ANSI colors.\n    caption-color:\n    # string, the axis color. one of ANSI colors.\n    axis-color:\n    # string, the label color. one of ANSI colors.\n    label-color:\n    # duration, the graph refresh timer.\n    refresh-timer: 1s\n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allows for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # list of processors to apply on the message before writing\n    event-processors: \n    # bool enable debug\n    debug: false \n```\n\n### Example\n\nThis example shows how to use the `asciigraph` output.\n\ngNMIc config\n\n```shell\ncat gnmic_asciiout.yaml\n```\n\n```yaml\ntargets:\n  clab-nfd33-spine1-1:\n    username: admin\n    password: NokiaSrl1!\n    skip-verify: true\n\nsubscriptions:\n  sub1:\n    paths:\n      - /interface[name=ethernet-1/3]/statistics/out-octets\n      - /interface[name=ethernet-1/3]/statistics/in-octets\n    stream-mode: sample\n    sample-interval: 1s\n    encoding: ascii\n\noutputs:\n  out1:\n    type: asciigraph\n    caption: in/out octets per second\n    event-processors:\n      - rate\n\nprocessors:\n  rate:\n    event-starlark:\n      script: rate.star\n```\n\nStarlark processor\n\n```shell\ncat rate.star\n```\n\n```python\ncache = {}\n\nvalues_names = [\n  '/interface/statistics/out-octets',\n  '/interface/statistics/in-octets'\n]\n\nN=2\n\ndef apply(*events):\n  for e in events:\n    for value_name in values_names:\n      v = e.values.get(value_name)\n      # check if v is not None and is a digit to proceed\n      if not v:\n        continue\n      if not v.isdigit():\n        continue\n      # update cache with the latest value\n      val_key = \"_\".join([e.tags[\"source\"], e.tags[\"interface_name\"], value_name])\n      if not cache.get(val_key):\n        # initialize the cache entry if empty\n        cache.update({val_key: []})\n      if len(cache[val_key]) > N:\n        # remove the oldest entry if the number of entries reached N\n        cache[val_key] = cache[val_key][1:]\n      # update cache entry\n      cache[val_key].append((int(v), e.timestamp))\n      # get the list of values\n      val_list = cache[val_key]\n      # calculate rate\n      e.values[value_name+\"_rate\"] = rate(val_list)\n      e.values.pop(value_name)\n    \n  return events\n\ndef rate(vals):\n  previous_value, previous_timestamp = None, None\n  for value, timestamp in vals:\n    if previous_value != None and previous_timestamp != None:\n      time_diff = (timestamp - previous_timestamp) / 1000000000 # 1 000 000 000\n      if time_diff > 0:\n        value_diff = value - previous_value\n        rate = value_diff / time_diff\n        return rate\n\n    previous_value = value\n    previous_timestamp = timestamp\n\n  return 0\n```\n\n<script async id=\"asciicast-617477\" src=\"https://asciinema.org/a/617477.js\"></script>\n"
  },
  {
    "path": "docs/user_guide/outputs/file_output.md",
    "content": "`gnmic` supports exporting subscription updates to multiple local files\n\nA file output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: file \n    # filename to write telemetry data to.\n    # will be ignored if `file-type` is set\n    filename: /path/to/filename\n    # file-type, stdout or stderr.\n    # overwrites `filename`\n    file-type: # stdout or stderr\n    # string, message formatting, json, protojson, prototext, event\n    format: \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # string, a GoTemplate that is executed using the received gNMI message as input.\n    # the template execution is the last step before the data is written to the file,\n    # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any\n    # then finally the msg-template is executed.\n    msg-template:\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: \n    # boolean, format the output in indented form with every element on a new line.\n    multiline: \n    # string, indent specifies the set of indentation characters to use in a multiline formatted output\n    indent: \n    # string, separator is the set of characters to write between messages, defaults to new line\n    separator: \n    # integer, specifies the maximum number of allowed concurrent file writes\n    concurrency-limit: 1000 \n     # boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false\n     # list of processors to apply on the message before writing\n    event-processors:\n    # file rotation configuration\n    rotation:\n      max-size: 100 # size in megabytes\n      max-age: 30 # max age in days\n      max-backups: 3 # maximum number of old files to store, not counting the current file\n      compress: false # whether or not to enable compression\n      \n```\n\nThe file output can be used to write to file on the disk, to stdout or to stderr. Also includes support for rotating files to control disk utilization and maximum age using the `rotation` configuration section.\n\nFor a disk file, a file name is required.\n\nFor stdout or stderr, only file-type is required.\n"
  },
  {
    "path": "docs/user_guide/outputs/gnmi_output.md",
    "content": "`gnmic` supports acting as a `gNMI Server` to expose the subscribed telemetry data to a `gNMI Client` using the `Subcribe` RPC, or to act as a gateway for `Get` and `Set` RPCs.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmi_server.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmi_server.drawio\" async></script>\n\n### Configuration\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: gnmi \n    # gNMI server address, either a TCP socket or UNIX socket. \n    # In the latter case, the prefix `unix:///` should be present.\n    address: \":57400\"\n    # maximum number of active subscriptions.\n    max-subscriptions: 64\n    # maximum number of ongoing Get/Set RPCs.\n    max-unary-rpc: 64\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this certificate is used to verify the clients certificates.\n      ca-file:\n      # string, server certificate file.\n      cert-file:\n      # string, server key file.\n      key-file:\n      # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n      #  - request:         The server requests a certificate from the client but does not \n      #                     require the client to send a certificate. \n      #                     If the client sends a certificate, it is not required to be valid.\n      #  - require:         The server requires the client to send a certificate and does not \n      #                     fail if the client certificate is not valid.\n      #  - verify-if-given: The server requests a certificate, \n      #                     does not fail if no certificate is sent. \n      #                     If a certificate is sent it is required to be valid.\n      #  - require-verify:  The server requires the client to send a valid certificate.\n      #\n      # if no ca-file is present, `client-auth` defaults to \"\"`\n      # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n      client-auth: \"\"\n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the returned Prefix.Target is empty.\n    # if left empty, it defaults to:\n    # `{{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present).\n    target-template:\n    # boolean, enables extra logging for the gNMI Server\n    debug: false\n    # boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n```\n\n#### Insecure Mode\n\nBy default, the server runs in insecure mode, as long as `skip-verify` is false and none of `ca-file`, `cert-file` and `key-file` are set.\n\n#### Secure Mode\n\nTo run this gNMI server in secure mode, there are a few options:\n\n- **Using self signed certificates, without client certificate verification:**\n\n```yaml\nskip-verify: true\n```\n\n- **Using self signed certificates, with client certificate verification:**\n\n```yaml\n# a valid CA certificate to verify the client provided certificates\nca-file: /path/to/caFile \n```\n  \n- **Using CA provided certificates, without client certificate verification:**\n\n```yaml\nskip-verify: true\n# a valid server certificate\ncert-file: /path/to/server-cert\n# a valid server key\nkey-file:  /path/to/server-key\n```\n\n- **Using CA provided certificates, with client certificate verification:**\n\n```yaml\n# a valid CA certificate to verify the client provided certificates\nca-file: /path/to/caFile \n# a valid server certificate\ncert-file: /path/to/server-cert\n# a valid server key\nkey-file:  /path/to/server-key\n```\n\n### Supported RPCs\n\nThis `gNMI Server` supports `Get`, `Set` and `Subscribe` RPCs.\n\n#### gNMI Subscribe RPC\n\nThe server keeps a cache of gNMI notifications synched with the configured targets based on the configured subscriptions.\nThis means that a client cannot get updates about a leaf that `gNMIc` did not subscribe to upstream.\n\nAs soon as there is an update to the cache, the added gNMI notification is sent to all the client which subscription matches the new notification.\n\nClients can subscribe to specific target using the gNMI Prefix Target field, leaving the Target field empty or setting it to `*` is equivalent to subscribing to all known targets.\n\n#### gNMI Get RPC\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/gnmi_server.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fgnmi_server.drawio\" async></script>\n\nThe server supports the gNMI `Get` RPC.\nIt relies on the Prefix.Target field to select the target(s) to relay the received GetRequest to.\n\nIf Prefix.Target is empty or is equal to `*`, a Get RPC is performed for all known targets.\nThe received GetRequest is cloned, enriched with each target name and sent to the corresponding destination.\n\nComma separated target names are also supported and allow to select a list of specific targets to send the Get RPC to.\n\nOnce all GetResponses are received back successfully, the notifications contained in each GetResponse are combined into a single GetResponse with their Prefix.Target populated, if empty.\n\nThe resulting GetResponse is then returned to the gNMI client.\nIf one of the RPCs fails, an error with status code `Internal(13)` is returned to the client.\n\nIf the Get Request has the origin field set to `gnmic`, the request is performed against the internal server configuration.\nCurrently only the path `targets` is supported.\n\n```bash\ngnmic -a localhost:57400 --skip-verify get --path gnmic:/targets\n```\n\n```json\n[\n  {\n    \"timestamp\": 1626759382486891218,\n    \"time\": \"2021-07-20T13:36:22.486891218+08:00\",\n    \"prefix\": \"gnmic:targets[name=clab-gw-srl1:57400]\",\n    \"updates\": [\n      {\n        \"Path\": \"address\",\n        \"values\": {\n          \"address\": \"clab-gw-srl1:57400\"\n        }\n      },\n      {\n        \"Path\": \"username\",\n        \"values\": {\n          \"username\": \"admin\"\n        }\n      },\n      {\n        \"Path\": \"insecure\",\n        \"values\": {\n          \"insecure\": \"false\"\n        }\n      },\n      {\n        \"Path\": \"skip-verify\",\n        \"values\": {\n          \"skip-verify\": \"true\"\n        }\n      },\n      {\n        \"Path\": \"timeout\",\n        \"values\": {\n          \"timeout\": \"10s\"\n        }\n      }\n    ]\n  },\n  {\n    \"timestamp\": 1626759382486900697,\n    \"time\": \"2021-07-20T13:36:22.486900697+08:00\",\n    \"prefix\": \"gnmic:targets[name=clab-gw-srl2:57400]\",\n    \"updates\": [\n      {\n        \"Path\": \"address\",\n        \"values\": {\n          \"address\": \"clab-gw-srl2:57400\"\n        }\n      },\n      {\n        \"Path\": \"username\",\n        \"values\": {\n          \"username\": \"admin\"\n        }\n      },\n      {\n        \"Path\": \"insecure\",\n        \"values\": {\n          \"insecure\": \"false\"\n        }\n      },\n      {\n        \"Path\": \"skip-verify\",\n        \"values\": {\n          \"skip-verify\": \"true\"\n        }\n      },\n      {\n        \"Path\": \"timeout\",\n        \"values\": {\n          \"timeout\": \"10s\"\n        }\n      }\n    ]\n  }\n]\n```\n\n#### gNMI Set RPC\n\nThe gNMI server supports the gNMI `Set` RPC.\nJust like in the case of `Get` RPC, the server relies on the `Prefix.Target` field to select the target(s) to relay the received SetRequest to.\n\nIf Prefix.Target is empty or is equal to `*`, a Set RPC is performed for all known targets.\nThe received SetRequest is cloned, enriched with each target name and sent to the corresponding destination.\n\nComma separated target names are also supported and allow to select a list of specific targets to send the Set RPC to.\n\nOnce all SetResponses are received back successfully, the `UpdateResult`s from each response are merged into a single SetResponse, with the addition of the target name set in `Path.Target`.\nThis is not compliant with the gNMI specification which stipulates that the `Target` field should only be present in [Prefix Paths](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target)\n\nThe resulting SetResponse is then returned to the gNMI client.\nIf one of the RPCs fails, an error with status code `Internal(13)` is returned to the client.\n"
  },
  {
    "path": "docs/user_guide/outputs/influxdb_output.md",
    "content": "`gnmic` supports exporting subscription updates to [influxDB](https://www.influxdata.com/products/influxdb-overview/) time series database\n\n## Configuration\n\nAn influxdb output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: influxdb \n    # influxDB server address\n    url: http://localhost:8086 \n    # empty if using influxdb1.8.x\n    org: myOrg \n    # string in the form database/retention-policy. Skip retention policy for the default on\n    bucket: telemetry\n    # influxdb 1.8.x use a string in the form: \"username:password\"\n    token: \n    # number of points to buffer before writing to the server\n    batch-size: 1000 \n    # flush period after which the buffer is written to the server whether the batch_size is reached or not\n    flush-timer: 10s\n    # if true, the influxdb client will use gzip compression in write requests.\n    use-gzip: false\n    # (deprecated, use tls.skip-verify: true) \n    #if true, the influxdb client will use a secure connection to the server.\n    enable-tls: false\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the clients certificates when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false \n    # server health check period, used to recover from server connectivity failure.\n    # health check is disabled by default, can be enabled by setting the below field to any value other that zero.\n    # with a minimum allowed period of 30s.\n    health-check-period: 0s \n    # defines the write timestamp precision, \n    # one of `s` for second, `ms` for millisecond, `us` for microsecond and `ns` for nanoseconds\n    # any other value defaults to `ns`.\n    timestamp-precision: ns\n    # server health check period, used to recover from server connectivity failure\n    health-check-period: 30s \n    # enable debug\n    debug: false \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: []\n    # cache, if present enables the influxdb output to cache received updates and write them all together \n    # at `cache-flush-timer` expiry.\n    cache:\n      # duration, if > 0, enables the expiry of values written to the cache.\n      expiration: 0s\n      # debug, if true enable extra logging\n      debug: false\n    # cache-flush-timer\n    cache-flush-timer: 5s\n```\n\n`gnmic` uses the [`event`](../event_processors/intro.md#the-event-format) format to generate the measurements written to InfluxDB. When an event has been processed through `gnmic` processors, the final value of the `subscription-name` tag will be used as an InfluxDB measurement name and the tag will be removed. If the `subscription-name` tag does not exist in the event, the event's `Name` will be used as InfluxDB measurement.\n\n## Caching\n\nWhen caching is enabled, the received messages are not written directly to InfluxDB, they are first cached as gNMI updates and written in batch when the `cache-flush-timer` is reached.\n\nThe below diagram shows how an InfluxDB output works with and without cache enabled:\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:10,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/influxdb_output_with_without_cache.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/influxdb_output_with_without_cache.drawio\" async></script>\n\nWhen caching is enabled, the cached gNMI updates are periodically retrieved in batch, converted to [events](../event_processors/intro.md#the-event-format).\n\nIf [processors](../event_processors/intro.md) are defined under the output, they are applied to the whole list of events at once. This allows augmenting some messages with values from other messages even if they where collected from a different target/subscription.\n"
  },
  {
    "path": "docs/user_guide/outputs/jetstream_output.md",
    "content": "`gnmic` supports exporting subscription updates [NATS Jetstream](https://docs.nats.io/nats-concepts/jetstream) servers.\n\nA [Jetstream](https://docs.nats.io/nats-concepts/jetstream) output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n### configuration\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: jetstream \n    # NATS publisher name\n    # if left empty, this field is populated with the output name used as output ID (output1 in this example).\n    # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name).\n    # note that each jetstream worker (publisher) will get a client name=$name-$index\n    name: \"\"\n    # Comma separated NATS servers\n    address: localhost:4222\n    # string, stream name to write update to,\n    # if `create-stream` is set, it will be created\n    # if `create-stream` is omitted, an existing stream with this name must be available\n    # may not contain spaces, tabs, period (.), greater than (>) or asterisk (*)\n    stream:\n    # defines stream parameters that gNMIc will create on the target jetstream server(s)\n    # if omitted, gnmic will use an existing stream and will not attempt to create or modify it\n    create-stream:\n      # string, stream description\n      description: created by gNMIc\n      # string list, list of subjects allowed on the stream\n      # defaults to `.create-stream.$name.>`\n      subjects:\n      # string, one of `memory`, `file`.\n      # defines the storage type to use for the stream.\n      # defaults to `memory`\n      storage:\n      # int64, max number of messages in the stream.\n      max-msgs:\n      # int64, max bytes the stream may contain.\n      max-bytes:\n      # duration, max age of any message in the stream.\n      max-age:\n      # int32, maximum message size\n      max-msg-size:\n      # string, retention policy for the stream: `limits` or `workqueue`\n      # `limits`: messages are retained based on size, count, or age limits\n      # `workqueue`: messages are removed after being acknowledged by all consumers\n      # defaults to `limits`\n      retention-policy: limits\n    # string, one of `static`, `subscription.target`, `subscription.target.path` \n    # or `subscription.target.pathKeys`.\n    # Defines the subject format.\n    # `static`: \n    #       all updates will be written to the subject name set under `outputs.$output_name.subject`\n    # `subscription.target`: \n    #       updates from each subscription, target will be written \n    #       to subject $subscription_name.$target_name\n    # `subscription.target.path`: \n    #       updates from a certain subscription, target and path \n    #       will be written to subject $subscription_name.$target_name.$path.\n    #       The path is built by joining the gNMI path pathElements with a dot (.).\n    #       e.g: /interface[name=ethernet-1/1]/statistics/in-octets\n    #       -->  interface.statistics.in-octets \n    # `subscription.target.pathKeys`: \n    #       updates from a certain subscription, a certain target and a certain path \n    #       will be written to subject $subscription_name.$target_name.$path.\n    #       The path is built by joining the gNMI path pathElements and Keys with a dot (.).\n    #       e.g: /interface[name=ethernet-1/1]/statistics/in-octets\n    #       -->  interface.{name=ethernet-1/1}.statistics.in-octets \n    # `target.subscription`:\n    #       updates from each subscription, target will be written with a prefix of the `subject`\n    #       to subject $subject.$target_name.$subscription_name if `subject` is present. If not,\n    #       it will write to $target_name.$subscription_name.\n    subject-format: static \n    # If a subject-format is `static`, gnmic will publish all subscriptions updates \n    # to a single subject configured under this field. Defaults to 'telemetry'\n    # If a subject-format is `target.subscription`, gnmic will publish subscripion\n    # updates prefixed with this subject.\n    subject: telemetry\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the clients certificates when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # NATS username\n    username: \n    # NATS password  \n    password: \n    # wait time before reconnection attempts\n    connect-time-wait: 2s \n    # Exported message format, one of: proto, prototext, protojson, json, event\n    format: event \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # string, a GoTemplate that is executed using the received gNMI message as input.\n    # the template execution is the last step before the data is written to the file.\n    # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any\n    # then finally the msg-template is executed.\n    msg-template:\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # integer, number of nats publishers to be created\n    num-workers: 1 \n    # duration after which a message waiting to be handled by a worker gets discarded\n    write-timeout: 5s \n    # boolean, enables extra logging for the nats output\n    debug: false\n    # integer, sets the size of the local buffer where received\n    # NATS messages are stored before being sent to outputs.\n    # This value is set per worker. Defaults to 0 messages\n    buffer-size: 0\n    # boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply to the message before writing\n    event-processors: \n```\n\n### subject-format\n\nThe `subject-format` field is used to control how the received gNMI notifications are written into the configured stream.\n\n#### static\n\nAll notifications will be written to the subject name set under `outputs.$output_name.subject`\n\n#### subscription.target\n\nNotifications from each subscription and target pair will be written to subject `$subscription_name.$target_name`\n\n#### subscription.target.path\n\nNotifications from a subscription, target and path tuple\nwill be written to subject $subscription_name.$target_name.$path.\nThe path is built by joining the gNMI path pathElements with a period `(.)`.\n\nNotifications containing more than one update, will be expanded into multiple notifications with one update each.\n\nE.g:\n\nAn update from target `target1` and subscription `sub1` containing path `/interface[name=ethernet-1/1]/statistics/in-octets`,\nwill be written to subject:\n\n```text\n$stream_name.sub1.target1.interface.statistics.in-octets\n```\n\n#### subscription.target.pathKeys\n\nUpdates from a certain subscription, a certain target and a certain path will be written to subject `$subscription_name.$target_name.$path`.\nThe path is built by joining the gNMI path pathElements and Keys with a period `(.)`.\n\nNotifications containing more than one update, will be expanded into multiple notifications with one update each.\n\nE.g:\n\nAn update from target `target1` and subscription `sub1` containing path `/interface[name=ethernet-1/1]/statistics/in-octets`,\nwill be written to subject:\n\n```text\n$stream_name.sub1.target1.interface.{name=ethernet-1/1}.statistics.in-octets\n```\n\n### JetStream Queue Patterns\n\nJetStream streams support three retention policies that enable different message processing patterns: Limits, Workqueue and Interest-Based. Gnmic supports Limits by default, and optionally supports workqueue based. It does not currently support Interest-based. \n\n#### Limits Retention (Default)\n\nMessages are retained based on configured limits (max-msgs, max-bytes, max-age). When these limits are exceeded, older messages are automatically removed, regardless of whether they have been consumed.\n\n**Use limits retention when:**\n- You don't have an better ideas :)\n- You want to have multiple consumers fetch the same metric asyncronously within the configured retention time.\n- You can accept losing messages that overfill the retention time, even when they were not acknoledged yet.\n\n**Example configuration:**\n\n```yaml\noutputs:\n  telemetry-output:\n    type: jetstream\n    address: localhost:4222\n    stream: telemetry-stream\n    create-stream:\n      retention-policy: limits  # default\n      storage: memory\n      max-msgs: 100000\n      max-bytes: 10737418240  # 10GB\n      max-age: 24h\n    subject-format: subscription.target\n```\n\n#### Workqueue Retention\n\nMessages are automatically removed from the stream after being acknowledged by the consumer of that subject. This enables exactly-once message processing, where each message is processed by only one consumer and then deleted.\n\n**Use workqueue retention when:**\n- You have 1:1 producer/consumer configuration\n- You want automatic cleanup of messages after successful processing\n- Message loss is unacceptable (e.g. using a file persistence)\n\n**Example configuration:**\n\n```yaml\noutputs:\n  task-output:\n    type: jetstream\n    address: localhost:4222\n    stream: telemetry-stream\n    create-stream:\n      retention-policy: workqueue\n      storage: file\n      max-msgs: 100000\n      max-bytes: 10737418240  # 10GB\n    subject-format: subscription.target\n```\n\n#### Using Existing Streams\n\nIf a stream has already been created (e.g., by administrators or other applications), you can configure gnmic to use it by omitting the `create-stream` configuration:\n\n```yaml\noutputs:\n  existing-output:\n    type: jetstream\n    address: localhost:4222\n    stream: existing-stream\n    subject-format: static\n    subject: telemetry\n```\n\nWhen `create-stream` is omitted, gnmic will not attempt to create or modify the stream configuration. The stream with the specified name must already exist on the JetStream server. This is useful when:\n\n- Stream configuration is managed centrally\n- You don't have permissions to create streams\n- You want to ensure stream settings remain unchanged\n\n**Important:** When using an existing stream, ensure the stream's subjects configuration is compatible with your chosen `subject-format` and `subject` settings, otherwise messages may fail to publish.\n"
  },
  {
    "path": "docs/user_guide/outputs/kafka_output.md",
    "content": "`gnmic` supports exporting subscription updates to multiple Apache Kafka brokers/clusters simultaneously\n\n### Configuration sample\n\nA Kafka output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: kafka \n    # kafka client name. \n    # if left empty, this field is populated with the output name used as output ID (output1 in this example).\n    # the full name will be '$(name)-kafka-prod'.\n    # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-kafka-prod.\n    # note that each kafka worker (producer) will get client name=$name-$index\n    name: \"\"\n    # Comma separated brokers addresses\n    address: localhost:9092 \n    # Kafka topic name\n    topic: telemetry \n    # Kafka topic prefix\n    # If supplied, overrides the `topic` key and outputs to a separate topic per source\n    # named like `$topic_$subscriptionName_$targetName`. If `source` contains a port number separated with a colon,\n    # the colon will be replaced with an underscore due to restrictions on the naming of kafka topics.\n    # ex: telemetry_bgp_neighbor_state_device1_6030\n    topic-prefix: telemetry\n    # starts a sync-producer if set to true.\n    sync-producer: false\n    # required-acks is used in Produce Requests to tell the broker how many replica acknowledgements\n    # it must see before responding. One of `no-response`, `wait-for-local`, `wait-for-all`.\n    required-acks: wait-for-local\n    # Kafka SASL configuration\n    sasl:\n      # SASL user name\n      user:\n      # SASL password\n      password:\n      # SASL mechanism: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER are supported\n      mechanism:\n      # token url for OAUTHBEARER SASL mechanism\n      token-url:\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the clients certificates when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # The total number of times to retry sending a message\n    max-retry: 2 \n    # Kafka connection timeout\n    timeout: 5s \n    # Wait time to reestablish the kafka producer connection after a failure\n    recovery-wait-time: 10s \n    # Exported msg format, json, protojson, prototext, proto, event\n    format: event \n    # boolean, if true the kafka producer will add a key to \n    # the message written to the broker. The key value is ${source}_${subscription-name}.\n    # this is useful for Kafka topics with multiple partitions, it allows to keep messages from the same source and subscription in sequence.\n    insert-key: false\n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allows for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # string, a GoTemplate that is executed using the received gNMI message as input.\n    # the template execution is the last step before the data is written to the file,\n    # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any\n    # then finally the msg-template is executed.\n    msg-template:\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # Number of kafka producers to be created \n    num-workers: 1 \n    # (bool) enable debug\n    debug: false \n    # (int) number of messages to buffer before being picked up by the workers\n    buffer-size: 0\n    # (string) enables compression of produced message. One of gzip, snappy, zstd, lz4\n    compression-codec: gzip\n    # (bool) enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: \n```\n\nCurrently all subscriptions updates (all targets and all subscriptions) are published to the defined topic name unless the `topic-prefix` configuration option is set.\n\n### Kafka Security protocol\n\nKafka clients can operate with 4 [security protocols](https://kafka.apache.org/24/javadoc/org/apache/kafka/common/security/auth/SecurityProtocol.html), \ntheir configuration is controlled via both `.tls` and `.sasl` fields under the output config.\n\n**Security Protocol**  | **Description**                           | **Configuration**                       |\n-----------------------|-------------------------------------------|-----------------------------------------|\n`PLAINTEXT`            | Un-authenticated, non-encrypted channel   | `.tls` and `.sasl` are **NOT** present  |\n`SASL_PLAINTEXT`       | SASL authenticated, non-encrypted channel | only `.sasl` is present                 |\n`SASL_SSL`             | SASL authenticated, SSL channel           | both `.tls` and `.sasl` are present     |\n`SSL`                  | SSL channel                               | only `.tls` is present                  |\n\n#### Security Configuration Examples\n\n=== \"PLAINTEXT\"\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        # other fields\n        # no tls and no sasl fields\n    ```\n=== \"SASL_PLAINTEXT\"\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        sasl:\n          user: admin\n          password: secret\n        # other fields\n        # no tls field\n    ```\n=== \"SASL_SSL\"\n    Example1: Without server certificate verification\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        sasl:\n          user: admin\n          password: secret\n        tls:\n          skip-verify: true\n        # other fields\n        # ...\n    ```\n    Example2: With server certificate verification\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        sasl:\n          user: admin\n          password: secret\n        tls:\n          ca-file: /path/to/ca-file\n        # other fields\n        # ...\n    ```\n    Example3: With client certificates\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        sasl:\n          user: admin\n          password: secret\n        tls:\n          cert-file: /path/to/cert-file\n          key-file: /path/to/cert-file\n        # other fields\n        # ...\n    ```\n    Example4: With both server certificate verification and client certificates\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        sasl:\n          user: admin\n          password: secret\n        tls:\n          cert-file: /path/to/cert-file\n          key-file: /path/to/cert-file\n          ca-file: /path/to/ca-file\n        # other fields\n        # ...\n    ```\n=== \"SSL\"\n    Example1: Without server certificate verification\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        tls:\n          skip-verify: true\n        # other fields\n        # no sasl field\n    ```\n    Example2: With server certificate verification\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        tls:\n          ca-file: /path/to/ca-file\n        # other fields\n        # no sasl field\n    ```\n    Example3: With client certificates\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        tls:\n          cert-file: /path/to/cert-file\n          key-file: /path/to/cert-file\n        # other fields\n        # no sasl field\n    ```\n    Example4: With both server certificate verification and client certificates\n    ```yaml\n    outputs:\n      output1:\n        type: kafka\n        topic: my_kafka_topic\n        tls:\n          cert-file: /path/to/cert-file\n          key-file: /path/to/cert-file\n          ca-file: /path/to/ca-file\n        # other fields\n        # no sasl field\n    ```\n\n### Kafka Output Metrics\n\nWhen a Prometheus server is enabled, `gnmic` kafka output exposes 4 prometheus metrics, 3 Counters and 1 Gauge:\n\n* `number_of_kafka_msgs_sent_success_total`: Number of msgs successfully sent by gnmic kafka output. This Counter is labeled with the kafka producerID\n* `number_of_written_kafka_bytes_total`: Number of bytes written by gnmic kafka output. This Counter is labeled with the kafka producerID\n* `number_of_kafka_msgs_sent_fail_total`: Number of failed msgs sent by gnmic kafka output. This Counter is labeled with the kafka producerID as well as the failure reason\n* `msg_send_duration_ns`: gnmic kafka output send duration in nanoseconds. This Gauge is labeled with the kafka producerID\n"
  },
  {
    "path": "docs/user_guide/outputs/nats_output.md",
    "content": "`gnmic` supports exporting subscription updates to multiple NATS servers/clusters simultaneously\n\nA [NATS](https://docs.nats.io/) output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: nats \n    # NATS publisher name\n    # if left empty, this field is populated with the output name used as output ID (output1 in this example).\n    # the full name will be '$(name)-nats-pub'.\n    # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-nats-pub.\n    # note that each nats worker (publisher) will get client name=$name-$index\n    name: \"\"\n    # Comma separated NATS servers\n    address: localhost:4222 \n    # This prefix is used to to build the subject name for each target/subscription\n    subject-prefix: telemetry \n    # If a subject-prefix is not specified, gnmic will publish all subscriptions updates to a single subject configured under this field. Defaults to 'telemetry'\n    subject: telemetry \n    # NATS username\n    username: \n    # NATS password  \n    password: \n    # wait time before reconnection attempts\n    connect-time-wait: 2s \n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the clients certificates when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # Exported message format, one of: proto, prototext, protojson, json, event\n    format: json \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # string, a GoTemplate that is executed using the received gNMI message as input.\n    # the template execution is the last step before the data is written to the file,\n    # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any\n    # then finally the msg-template is executed.\n    msg-template:\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # integer, number of nats publishers to be created\n    num-workers: 1 \n    # duration after which a message waiting to be handled by a worker gets discarded\n    write-timeout: 5s \n    # boolean, enables extra logging for the nats output\n    debug: false\n    # integer, sets the size of the local buffer where received\n    # NATS messages are stored before being sent to outputs.\n    # This value is set per worker. Defaults to 0 messages\n    buffer-size: 0\n    # boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: \n```\n\nUsing `subject` config value, a user can specify the NATS subject to which to send all subscriptions updates for all targets\n\nIf a user wants to separate updates by targets and by subscriptions, `subject-prefix` can be used. if `subject-prefix` is specified `subject` is ignored.\n\n`gnmic` takes advantage of NATS [subject hierarchy](https://docs.nats.io/nats-concepts/subjects#subject-hierarchies) by publishing gNMI subscription updates to a separate subject per target per subscription.\n\nThe NATS subject name is built out of the `subject-prefix`, `name` under the target definition and `subscription-name` resulting in the following format: `subject-prefix.name.subscription-name`\n\ne.g: for a target `router1`, a subscription name `port-stats` and subject-prefix `telemetry` the subject name will be `telemetry.router1.port-stats`\n\nIf the target name is an IP address, or a hostname (meaning potentially contains `.`), the `.` characters are replaced with a `-`\n\ne.g: for a target `172.17.0.100:57400`, the previous subject name becomes `telemetry.172-17-0-100:57400.port-stats`\n\nThis way a user can subscribe to different subsets of updates by tweaking the subject name:\n\n* `\"telemetry.>\"` gets all updates sent to NATS by all targets, all subscriptions\n* `\"telemetry.router1.>\"` gets all NATS updates for target router1\n* `\"telemetry.*.port-stats\"` gets all updates from subscription port-stats, for all targets\n"
  },
  {
    "path": "docs/user_guide/outputs/otlp_output.md",
    "content": "`gnmic` supports exporting subscription updates as [OpenTelemetry](https://opentelemetry.io/) metrics using the [OTLP](https://opentelemetry.io/docs/specs/otlp/) protocol.\n\nThis output can be used to push metrics to any OTLP-compatible backend such as [Grafana Alloy](https://grafana.com/docs/alloy/latest/), [Grafana Mimir](https://grafana.com/docs/mimir/latest/), [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/), [Datadog](https://www.datadoghq.com/), [Dynatrace](https://www.dynatrace.com/), or any system that accepts OTLP metrics over gRPC.\n\n## Configuration\n\nAn OTLP output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: otlp\n    # required, address of the OTLP collector\n    endpoint: localhost:4317\n    # string, transport protocol. Only \"grpc\" is supported.\n    # defaults to \"grpc\"\n    protocol: grpc\n    # duration, defaults to 10s.\n    # RPC timeout for each export request.\n    timeout: 10s\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the server certificate when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # integer, defaults to 1000.\n    # number of events to buffer before sending a batch to the collector.\n    # events are sent every `interval` or when the batch is full, whichever comes first.\n    batch-size: 1000\n    # duration, defaults to 5s.\n    # time interval between export requests.\n    interval: 5s\n    # integer, defaults to 2x batch-size.\n    # size of the internal event buffer.\n    buffer-size: 2000\n    # integer, defaults to 3.\n    # number of retries per export request on failure.\n    max-retries: 3\n    # string, to be used as the metric namespace\n    metric-prefix: \"\"\n    # boolean, if true the subscription name will be prepended to the metric name after the prefix.\n    append-subscription-name: false\n    # boolean, if true, string type values are exported as gauge metrics with value=1\n    # and the string stored as an attribute named \"value\".\n    # if false, string values are dropped.\n    strings-as-attributes: false\n    # list of tag keys to place as OTLP Resource attributes.\n    # these tags are excluded from data point attributes.\n    # defaults to empty (all tags become data point attributes).\n    resource-tag-keys:\n      # - device\n      # - vendor\n      # - model\n      # - site\n      # - source\n    # list of regex patterns matched against the value key (metric path).\n    # if any pattern matches, the metric is exported as a monotonic cumulative Sum (counter).\n    # unmatched metrics are exported as Gauges.\n    # defaults to empty (all metrics are Gauges).\n    counter-patterns:\n      # - \"counter\"\n      # - \"octets|packets|bytes\"\n      # - \"errors|discards|drops\"\n    # map of string:string, additional static attributes to add to the OTLP Resource.\n    resource-attributes:\n      # key: value\n    # map of string:string, HTTP headers (or gRPC metadata) to include with every export request.\n    # Use this to set tenant/org identifiers required by multi-tenant backends such as\n    # Grafana Mimir, Loki, or Tempo.\n    headers:\n      # X-Scope-OrgID: my-tenant\n    # integer, defaults to 1.\n    # number of workers processing events.\n    num-workers: 1\n    # boolean, defaults to false.\n    # enables debug logging.\n    debug: false\n    # boolean, defaults to false.\n    # enables the collection and export (via prometheus) of output specific metrics.\n    enable-metrics: false\n    # list of processors to apply on the message before writing\n    event-processors:\n```\n\n## Metric Naming\n\nThe metric name is built from up to three parts joined by underscores:\n\n1. The value of `metric-prefix`, if configured.\n2. The subscription name, if `append-subscription-name` is `true`.\n3. The gNMI path (value key), with `/` and `-` replaced by `_`.\n\nFor example, a gNMI update from subscription `port-stats` with path:\n\n```\n/interfaces/interface[name=1/1/1]/state/counters/in-octets\n```\n\nwith `metric-prefix: gnmic` and `append-subscription-name: true`, produces a metric named:\n\n```\ngnmic_port_stats_interfaces_interface_state_counters_in_octets\n```\n\n## Metric Type Detection\n\nMetrics are classified based on the `counter-patterns` configuration:\n\n- **Sum (monotonic counter)**: if the value key matches any regex in `counter-patterns`.\n- **Gauge**: all other numeric values.\n\nBy default `counter-patterns` is empty, so all metrics are exported as Gauges. To classify counter-like metrics, configure the patterns explicitly:\n\n```yaml\ncounter-patterns:\n  - \"counter|octets|packets|bytes\"\n  - \"errors|discards|drops\"\n```\n\nEach pattern is a Go [regexp](https://pkg.go.dev/regexp/syntax) matched against the value key (the gNMI path portion of the metric, **before name transformation**).\n\n## Resource and Data Point Attributes\n\nEvent tags are split between the OTLP Resource and data point attributes based on the `resource-tag-keys` configuration:\n\n- Tags whose keys appear in `resource-tag-keys` are placed as **Resource attributes** and excluded from data point attributes.\n- All remaining tags become **data point attributes** (equivalent to Prometheus labels).\n\nBy default `resource-tag-keys` is empty, so all tags become data point attributes. To move device-level metadata to the OTLP Resource (keeping it out of Prometheus labels), configure it explicitly:\n\n```yaml\nresource-tag-keys:\n  - device\n  - vendor\n  - model\n  - site\n  - source\n```\n\nAdditional static attributes can be added to every Resource using `resource-attributes`:\n\n```yaml\nresource-attributes:\n  service.name: gnmic-collector\n  deployment.environment: production\n```\n\n## OTLP Resource Grouping\n\nEvents are grouped by their `source` tag (the target device address). Each unique source becomes a separate OTLP `ResourceMetrics` entry with its own set of resource attributes.\n\n## Custom Headers\n\nThe `headers` field attaches key/value pairs to every export request — as gRPC metadata when using the `grpc` protocol, or as HTTP headers when using `http`.\n\nThis is required by multi-tenant Grafana backends (Mimir, Loki, Tempo) which use the `X-Scope-OrgID` header to route data to the correct tenant:\n\n```yaml\noutputs:\n  mimir-output:\n    type: otlp\n    endpoint: mimir.example.com:4317\n    headers:\n      X-Scope-OrgID: my-tenant-id\n```\n\nMultiple headers can be set simultaneously:\n\n```yaml\nheaders:\n  X-Scope-OrgID: my-tenant-id\n  X-Custom-Header: some-value\n```\n\n## OTLP Output Metrics\n\nWhen `enable-metrics` is set to `true`, the OTLP output exposes the following Prometheus metrics:\n\n| Metric Name | Type | Description |\n|---|---|---|\n| `gnmic_otlp_output_number_of_sent_events_total` | Counter | Number of events successfully sent to the OTLP collector |\n| `gnmic_otlp_output_number_of_failed_events_total` | Counter | Number of events that failed to send |\n| `gnmic_otlp_output_send_duration_seconds` | Histogram | Duration of sending batches to the OTLP collector |\n| `gnmic_otlp_output_rejected_data_points_total` | Counter | Number of data points rejected by the collector (PartialSuccess) |\n"
  },
  {
    "path": "docs/user_guide/outputs/output_intro.md",
    "content": "In the context of gnmi subscriptions (on top of terminal output) `gnmic` supports multiple output options:\n\n* [Local file](file_output.md)\n* [NATS messaging system](nats_output.md)\n* [NATS Streaming messaging bus (STAN)](stan_output.md)\n* [NATS JetStream](jetstream_output.md)\n* [Kafka messaging bus](kafka_output.md)\n* [InfluxDB Time Series Database](influxdb_output.md)\n* [Prometheus Server](prometheus_output.md)\n* [Prometheus Remote Write](prometheus_write_output.md)\n* [UDP Server](udp_output.md)\n* [TCP Server](tcp_output.md)\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/outputs.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/outputs.drawio\" async></script>\n\nThese outputs can be mixed and matched at will with the different gnmi subscribe targets.\n\nWith multiple outputs defined in the [configuration file](../configuration_file.md) you can collect once\nand export the subscriptions updates to multiple locations formatted differently.\n\n### Defining outputs\n\nTo define an output a user needs to create the `outputs` section in the configuration file:\n\n```yaml\n# part of ~/gnmic.yml config file\noutputs:\n  output1:\n    type: file # output type\n    file-type: stdout # or stderr\n    format: json\n  output2:\n    type: file\n    filename: /path/to/localFile.log  \n    format: protojson\n  output3:\n    type: nats # output type\n    address: 127.0.0.1:4222 # comma separated nats servers addresses\n    subject-prefix: telemetry #\n    format: event\n  output4:\n    type: file\n    filename: /path/to/localFile.log  \n    format: json\n  output5:\n    type: stan # output type\n    address: 127.0.0.1:4223 # comma separated nats streaming servers addresses\n    subject: telemetry #\n    cluster-name: test-cluster #\n    format: proto\n  output6:\n    type: kafka # output type\n    address: localhost:9092 # comma separated kafka brokers addresses\n    topic: telemetry # kafka topic\n    format: proto\n  output7:\n    type: stan # output type\n    address: 127.0.0.1:4223 # comma separated nats streaming servers addresses\n    subject: telemetry\n    cluster-name: test-cluster\n```\n\n!!! note\n    Outputs names are case insensitive\n\n#### Output formats\n\nDifferent formats are supported for all outputs\n\n**Format/output** | **proto**                          | **protojson**                   |  **prototext**                      | **json**                       | **event**\n----------------- | ---------------------------------- | --------------------------------| ------------------------------------|--------------------------------|--------------------------------:\n**File**          | <span style=\"color:red\">:x:</span> | <span>:heavy_check_mark:</span> | <span>:heavy_check_mark:</span>     |<span>:heavy_check_mark:</span> |<span>:heavy_check_mark:</span>\n**NATS / STAN**   | <span>:heavy_check_mark:</span>    | <span>:heavy_check_mark:</span> | <span style=\"color:red\">:x: </span> |<span>:heavy_check_mark:</span> |<span>:heavy_check_mark:</span>\n**Kafka**         | <span>:heavy_check_mark:</span>    | <span>:heavy_check_mark:</span> | <span style=\"color:red\">:x: </span> |<span>:heavy_check_mark:</span> |<span>:heavy_check_mark:</span>\n**UDP / TCP**     | <span>:heavy_check_mark:</span>    | <span>:heavy_check_mark:</span> | <span>:heavy_check_mark:</span>     |<span>:heavy_check_mark:</span> |<span>:heavy_check_mark:</span>\n**InfluxDB**      | <span>NA</span>                    | <span>NA</span>                 | <span>NA</span>                     |<span>NA</span>                 |<span>NA</span>                    \n**Prometheus**    | <span>NA</span>                    | <span>NA</span>                 | <span>NA</span>                     |<span>NA</span>                 |<span>NA</span>                    \n\n#### Formats examples\n\n=== \"protojson\"\n    ```json\n    {\n      \"update\": {\n      \"timestamp\": \"1595491618677407414\",\n      \"prefix\": {\n        \"elem\": [\n          {\n            \"name\": \"configure\"\n          },\n          {\n            \"name\": \"system\"\n          }\n        ]\n      },\n      \"update\": [\n        {\n          \"path\": {\n            \"elem\": [\n              {\n                \"name\": \"name\"\n              }\n            ]\n            },\n            \"val\": {\n              \"stringVal\": \"sr123\"\n            }\n          }\n        ]\n      }\n    }\n    ```\n=== \"prototext\"\n    ```yaml\n    update: {\n      timestamp: 1595491704850352047\n      prefix: {\n        elem: {\n          name: \"configure\"\n        }\n        elem: {\n          name: \"system\"\n        }\n      }\n      update: {\n        path: {\n          elem: {\n            name: \"name\"\n          }\n        }\n        val: {\n          string_val: \"sr123\"\n        }\n      }\n    }\n    ```\n=== \"json\"\n    ```json\n    {\n      \"source\": \"172.17.0.100:57400\",\n      \"subscription-name\": \"sub1\",\n      \"timestamp\": 1595491557144228652,\n      \"time\": \"2020-07-23T16:05:57.144228652+08:00\",\n      \"prefix\": \"configure/system\",\n      \"updates\": [\n        {\n          \"Path\": \"name\",\n          \"values\": {\n            \"name\": \"sr123\"\n          }\n        }\n      ]\n    }\n    ```\n=== \"event\"\n    ```json\n    [\n      {\n        \"name\": \"sub1\",\n        \"timestamp\": 1595491586073072000,\n        \"tags\": {\n          \"source\": \"172.17.0.100:57400\",\n          \"subscription-name\": \"sub1\"\n      },\n        \"values\": {\n          \"/configure/system/name\": \"sr123\"\n        }\n      }\n    ]\n    ```\n\n### Binding outputs\n\nOnce the outputs are defined, they can be flexibly associated with the targets.\n\n```yaml\n# part of ~/gnmic.yml config file\ntargets:\n  router1.lab.com:\n    username: admin\n    password: secret\n    outputs:\n      - output1\n      - output3\n  router2.lab.com:\n    username: gnmi\n    password: telemetry\n    outputs:\n      - output2\n      - output3\n      - output4\n```\n\n### Caching\n\nBy default, `gNMIc` outputs write the received gNMI updates as they arrive (i.e without caching).\n\nCaching messages before writing them to a remote location can yield a few benefits like **rate limiting**, **batch processing**, **data replication**, etc.\n\nBoth `influxdb` and `prometheus` outputs support caching messages before exporting.\nCaching support for other outputs is planned.\n\nSee more details about caching [here](../caching.md)\n"
  },
  {
    "path": "docs/user_guide/outputs/prometheus_output.md",
    "content": "## Introduction\n\ngNMIc offers the capability to present gNMI updates on a Prometheus server, allowing a Prometheus system to perform scrapes.\n\nThe Prometheus metric name and its labels are generated according to the subscription name, gNMI path, and the value name.\n\nTo define a gNMIc Prometheus output, use the following format in the gnmic configuration file under the outputs section:\n\n```yaml\noutputs:\n  sample-prom-output:\n    type: prometheus # required\n    # address to listen on for incoming scrape requests\n    listen: :9804 \n    # path to query to get the metrics\n    path: /metrics \n    # maximum lifetime of metrics in the local cache, #\n    # a zero value defaults to 60s, a negative duration (e.g: -1s) disables the expiration\n    expiration: 60s \n    # a string to be used as the metric namespace\n    metric-prefix: \"\" \n    # a boolean, if true the subscription name will be appended to the metric name after the prefix\n    append-subscription-name: false \n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # a boolean, enables exporting timestamps received from the gNMI target as part of the metrics\n    export-timestamps: false \n    # a boolean, enables setting string type values as prometheus metric labels.\n    strings-as-labels: false\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this certificate is used to verify the clients certificates.\n      ca-file:\n      # string, server certificate file.\n      cert-file:\n      # string, server key file.\n      key-file:\n      # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n      #  - request:         The server requests a certificate from the client but does not \n      #                     require the client to send a certificate. \n      #                     If the client sends a certificate, it is not required to be valid.\n      #  - require:         The server requires the client to send a certificate and does not \n      #                     fail if the client certificate is not valid.\n      #  - verify-if-given: The server requests a certificate, \n      #                     does not fail if no certificate is sent. \n      #                     If a certificate is sent it is required to be valid.\n      #  - require-verify:  The server requires the client to send a valid certificate.\n      #\n      # if no ca-file is present, `client-auth` defaults to \"\"`\n      # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n      client-auth: \"\"\n    # see https://gnmic.openconfig.net/user_guide/caching/, \n    # if enabled, the received gNMI notifications are stored in a cache.\n    # the prometheus metrics are generated at the time a prometheus server sends scrape request.\n    # this behavior allows the processors (if defined) to be run on all the generated events at once.\n    # this mode uses more resource compared to the default one, but offers more flexibility when it comes \n    # to manipulating the data to customize the returned metrics using event-processors.\n    cache:\n    # duration, scrape request timeout.\n    # this timer is started when a scrape request is received, \n    # if it is reached, the metrics generation/collection is stopped.\n    timeout: 10s\n    # enable debug for prometheus output\n    debug: false \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # list of processors to apply on the message before writing\n    event-processors: \n    # an integer, sets the number of worker handling messages to be converted into Prometheus metrics\n    num-workers: 1\n    # Enables Consul service registration\n    service-registration:\n      # Consul server address, default to localhost:8500\n      address:\n      # Consul Data center, defaults to dc1\n      datacenter: \n      # Consul username, to be used as part of HTTP basicAuth\n      username:\n      # Consul password, to be used as part of HTTP basicAuth\n      password:\n      # Consul Token, is used to provide a per-request ACL token which overrides the agent's default token\n      token:\n      # address and port number to be registered as a service address in Consul.\n      # if the field is empty the address is derived from the listen field.\n      # if the address does not contain a port number, the port number fmro the listen field is used.\n      service-address: \n      # Prometheus service check interval, for both http and TTL Consul checks,\n      # defaults to 5s\n      check-interval:\n      # Maximum number of failed checks before the service is deleted by Consul\n      # defaults to 3\n      max-fail:\n      # Consul service name\n      name:\n      # List of tags to be added to the service registration, \n      # if available, the instance-name and cluster-name will be added as tags,\n      # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name\n      tags:\n      # bool, enables http service check on top of the TTL check\n      enable-http-check:\n      # string, if enable-http-check is true, this field can be used to specify the http endpoint to be used to the check\n      # if provided, this filed with be prepended with 'http://' (if not already present), \n      # and appended with the value in 'path' field.\n      # if not specified, it will be derived from the fields 'listen' and 'path'\n      http-check-address:\n      # if set to true, the gnmic instance will try to ac quire a lock before registering the prometheus output in consul.\n      # this allows to register a single instance of the cluster in consul.\n      # if the instance which acquired the lock fails, one of the remaining ones will take over.\n      use-lock: false\n```\n\n## Fields definition\n\n### **type**\n\n  The output type, `prometheus` in this case.\n\n### **listen**\n\n  Address to listen on for incoming scrape requests, defaults to `:9804`\n\n### **path**\n\n  URL Path to query in order to retrieve the metrics, defaults to `/metrics`\n\n### **expiration**\n\n  Maximum lifetime of metrics in the local cache,\n  A zero value defaults to 60s, a negative duration (e.g: -1s) disables the expiration\n\n### **metric-prefix**\n\n  A string to be used as the metric namespace\n\n### **append-subscription-name**\n\n  A boolean, if true the subscription name will be appended to the metric name after the prefix\n\n### **override-timestamps**\n\n  A boolean, if true the message timestamp is changed to current time\n  \n### **export-timestamps**\n\n  A boolean, enables exporting timestamps received from the gNMI target as part of the metrics\n  \n### **strings-as-labels**\n\n  A boolean, enables setting string type values as prometheus metric labels.\n\n### **tls**\n\n#### **ca-file**\n\n  A string, path to the CA certificate file.\n  This certificate is used to verify the clients certificates.\n\n#### **cert-file**\n\n  A string, path to server certificate file.\n\n#### **key-file**\n\n  A string, server key file.\n\n#### **client-auth**\n\n  A string, use to control whether the server requests a client certificate or not and how it validates it.\n  \n  One of:\n\n   - \"\": \n\n      The server does not request a certificate from the client.\n\n   - \"request\":\n      \n      The server requests a certificate from the client but does not require the client to send a certificate.\n      If the client sends a certificate, it is not required to be valid.\n\n   - \"require\":\n\n      The server requires the client to send a certificate and does not fail if the client certificate is not valid.\n\n   - \"verify-if-given\":\n\n      The server requests a certificate, does not fail if no certificate is sent. If a certificate is sent it is required to be valid.\n\n   - \"require-verify\":\n\n      The server requires the client to send a valid certificate.\n  \n  If the ca-file is not provided, the default value for client-auth is an empty string (\"\").\n\n  However, if a ca-file is specified, the default value for client-auth becomes \"require-verify\".\n\n### **cache**\n\n  Refer to the [cache docs](https://gnmic.openconfig.net/user_guide/caching) for more information.\n\n  When enabled, gNMI notifications are stored in a cache upon receipt.\n  Prometheus metrics are subsequently generated when a Prometheus system sends a scrape request.\n  This approach allows processors (if defined) to operate on all generated events simultaneously.\n  While this mode consumes more resources compared to the default, it provides increased flexibility for data manipulation and metric customization through the use of event-processors.\n\n### **timeout**\n\n  A Duration such as 10s, 1m or 1m30s, defines the scrape request timeout.\n  This timer is started when a scrape request is received from a Prometheus system.\n  If the timer is is reached, the metrics generation/collection is stopped.\n\n### **debug**\n\n  A boolean. Enables debug for prometheus output\n\n### **add-target**\n\n  A string, one of `overwrite`, `if-not-present` or ``.\n  This field allows populating/changing the value of Prefix.Target in the received message.\n\n  If left empty (\"\"), no changes will be made.\n\n  If set to \"overwrite\", the target value will be replaced with the configuration specified under target-template.\n\n  If set to \"if-not-present\", the target value will be populated only if it is empty, utilizing the target-template.\n\n### **target-template**\n\n  A string, a GoTemplate that allow for the customization of the target field in `Prefix.Target`.\n  It applies only if the previous field `add-target` is not empty.\n  If left `target-template` is left empty, it defaults to:\n  \n  ```\n  {{- if index . \"subscription-target\" -}}\n  {{ index . \"subscription-target\" }}\n  {{- else -}}\n  {{ index . \"source\" | host }}\n  {{- end -}}\n  ```\n\n  The above template sets the target to the value configured under `subscription.$subscription-name.target` if any,\n  otherwise it will set it to the target name stripped of the port number (if present)\n\n### **event-processors**\n\n  A string list. List of processors to apply on the message before writing\n\n### **service-registration**\n  \n  Enables Consul service registration\n\n#### address\n\n  Consul server address, default to localhost:8500\n\n#### datacenter\n\n  Consul Data center, defaults to dc1\n\n#### username\n\n  Consul username, to be used as part of HTTP basicAuth\n\n#### password\n\n  Consul password, to be used as part of HTTP basicAuth\n\n#### token\n\n  Consul Token, is used to provide a per-request ACL token which overrides the agent's default token\n\n#### service-address\n\n  Address and port number to be registered as a service address in Consul.\n  if the field is empty the address is derived from the listen field.\n  if the address does not contain a port number, the port number from the listen field is used.\n\n#### check-interval\n\n  Prometheus service check interval, for both http and TTL Consul checks, defaults to 5s\n\n#### max-fail\n\n  Maximum number of failed checks before the service is deleted by Consul\n  defaults to 3\n\n#### name\n\n  Consul service name\n\n#### tags\n\n  List of tags to be added to the service registration, \n  if available, the instance-name and cluster-name will be added as tags,\n  in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name\n  \n#### enable-http-check\n\n  A boolean, enables http service check on top of the TTL check\n\n#### http-check-address\n\n  A string, if enable-http-check is true, this field can be used to specify the http endpoint to be used to the check\n  if provided, this filed with be prepended with 'http://' (if not already present),\n  and appended with the value in 'path' field.\n  if not specified, it will be derived from the fields 'listen' and 'path'\n\n#### use-lock\n\n  A boolean, if set to true, the gnmic instance will try to acquire a lock before registering the prometheus output.\n  This knob allows to register a single instance of the cluster in Consul.\n  if the instance which acquired the lock fails, one of the remaining ones takes over by acquiring the lost lock.\n\n## Metric Generation\n\nThe below diagram shows an example of a prometheus metric generation from a gnmi update\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/prometheus_transformation.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fprometheus_transformation.drawio\" async></script>\n\n### **Metric Naming**\n\nThe metric name starts with the string configured under __metric-prefix__. \n\nThen if __append-subscription-name__ is `true`, the __subscription-name__ as specified in `gnmic` configuration file is appended.\n\nThe resulting string is followed by the gNMI __path__ stripped of its keys if there are any.\n\nAll non-alphanumeric characters are replaced with an underscore \"`_`\"\n\nThe 3 strings are then joined with an underscore \"`_`\"\n\nIf further customization of the metric name is required, the [processors](../event_processors/intro.md) can be used to transform the metric name.\n\nFor example, a gNMI update from subscription `port-stats` with path:\n\n```bash\n/interfaces/interface[name=1/1/1]/subinterfaces/subinterface[index=0]/state/counters/in-octets\n```\n\nis exposed as a metric named:\n\n```bash\ngnmic_port_stats_interfaces_interface_subinterfaces_subinterface_state_counters_in_octets\n```\n\n### **Metric Labels**\n\nThe metrics labels are generated from the subscription metadata (e.g: `subscription-name` and `source`) and the keys present in the gNMI path elements.\n\nFor the previous example the labels would be:\n\n```bash\n{interface_name=\"1/1/1\",subinterface_index=0,source=\"$routerIP:Port\",subscription_name=\"port-stats\"}\n```\n\n## Service Registration\n\n`gnmic` supports `prometheus_output` service registration via `Consul`.\n\nIt allows `prometheus` to dynamically discover new instances of `gnmic` exposing a prometheus server ready for scraping via its [service discovery feature](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config).\n\nIf the configuration section `service-registration` is present under the output definition, `gnmic` will register the `prometheus_output` service in `Consul`.\n\n### **Configuration Example**\n\nThe below configuration, registers a service name `gnmic-prom-srv` with `IP=10.1.1.1` and `port=9804`\n\n```yaml\n# gnmic.yaml\noutputs:\n  output1:\n    type: prometheus\n    listen: 10.1.1.1:9804\n    path: /metrics \n    service-registration:\n      address: consul-agent.local:8500\n      name: gnmic-prom-srv\n```\n\nThis allows running multiple instances of `gnmic` with minimal configuration changes by using `prometheus` [service discovery feature](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config).\n\nSimplified scrape configuration in the prometheus client:\n\n```yaml\n# prometheus.yaml\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s\n    consul_sd_configs:\n      - server: $CONSUL_ADDRESS\n        services:\n          - $service_name\n```\n\n### **Service Name and ID**\n\nThe `$service_name` to be discovered by `prometheus` is configured under `outputs.$output_name.service-registration.name`.\n\nIf the service registration name field is not present, the name `prometheus-${output_name}` will be used.\n\nIn both cases the service ID will be `prometheus-${output_name}-${instance_name}`.\n\n### **Service Checks**\n\n`gnmic` registers the service in `Consul` with a `ttl` check enabled by default:\n\n* `ttl`: `gnmic` periodically updates the service definition in `Consul`. The goal is to allow `Consul` to detect a same instance restarting with a different service name.\n\nIf `service-registration.enable-http-check` is `true`, an `http` check is added:\n\n* `http`: `Consul` periodically scrapes the prometheus server endpoint to check its availability.\n\n```yaml\n# gnmic.yaml\noutputs:\n  output1:\n    type: prometheus\n    listen: 10.1.1.1:9804\n    path: /metrics \n    service-registration:\n      address: consul-agent.local:8500\n      name: gnmic-prom-srv\n      enable-http-check: true\n```\n\nNote that for the `http` check to work properly, a reachable address ( IP or name ) should be specified under `listen`.\n\nOtherwise, a reachable address should be added under `service-registration.http-check-address`.\n\n## Caching\n\nWhen caching is enabled, the received messages are not immediately converted into metrics, they are cached as gNMI updates.\nThe conversion from gNMI update to Prometheus metrics happens only when a scrape request is received.\n\nThe below diagram shows how a `prometheus` output works with and without cache enabled:\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:10,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/prometheus_output_with_without_cache.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2F/prometheus_output_with_without_cache.drawio\" async></script>\n\nWhen caching is enabled, the received gNMI updates are not processed and converted into metrics immediately, they are rather stored as is in the configured gNMI cache.\n\nOnce a scrape request is received from `Prometheus`, all the cached gNMI updates are retrieved from the cache, converted to [events](../event_processors/intro.md#the-event-format), the configured processors, if any, are then applied to the whole list of events. Finally, The resulting event are converted into metrics and written back to `Prometheus` within the scrape response.\n\n## Prometheus Output Metrics\n\nWhen a Prometheus server (gNMI API) is enabled, `gnmic` prometheus output exposes 2 prometheus Gauges:\n\n* `number_of_prometheus_metrics_total`: Number of metrics stored by the prometheus output.\n* `number_of_prometheus_cached_metrics_total`: Number of metrics cached by the prometheus output.\n\n## Examples\n\n### **A simple Prometheus output**\n\nA basic Prometheus output utilizing all default values converts each received gNMI update into a Prometheus metric, retaining it in the cache until a scrape request is received from a Prometheus system.\n\n```yaml\noutputs:\n  simple-prom:\n    type: prometheus\n```\n\n### **Promote string values to labels**\n\nA straightforward Prometheus output, utilizing default values for the most part, transforms each incoming gNMI update into a Prometheus metric. In this process, if a value is a string, it is incorporated as a label in the final metric.\n\nThese metrics are retained in the cache, awaiting a scrape request from a Prometheus system.\n\n```yaml\noutputs:\n  simple-prom:\n    type: prometheus\n    strings-as-labels: true\n```\n\n### **Use a gNMI cache**\n\nA Prometheus output leveraging a gNMI cache stores incoming gNMI updates in their original form, only converting them into Prometheus metrics upon receiving a scrape request from a Prometheus system.\n\nThis mode enables batch processing of all updates simultaneously during their conversion into Prometheus metrics.\n\n```yaml\noutputs:\n  simple-prom:\n    type: prometheus\n    cache: {}\n```\n\n### ****Register as a Consul service****\n\nA Prometheus output that dynamically registers its endpoint within Consul, enabling the Prometheus system to seamlessly discover the associated address and port number.\n\n```yaml\noutputs:\n  simple-prom:\n    type: prometheus\n    service-registration:\n      address: consul-server-address:8500\n```\n"
  },
  {
    "path": "docs/user_guide/outputs/prometheus_write_output.md",
    "content": "`gnmic` supports writing metrics to Prometheus using its [remote write API](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/).\n\n`gNMIc`'s prometheus remote write can be used to push metrics to a variety of monitoring systems like [Prometheus](https://prometheus.io), [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)...\n\nA Prometheus write output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: prometheus_write\n    # url to push metrics towards, scheme is required\n    url: http://<grafana-mimir-addr>:9009/api/v1/push\n    # a map of string:string, \n    # custom HTTP headers to be sent along with each remote write request.\n    headers:\n      # header: value\n    # sets the `Authorization` header on every remote write request with the\n    # configured username and password.\n    authentication:\n      username:\n      password:\n    # sets the `Authorization` header with type `.authorization.type` and the token value.\n    authorization:\n      type: Bearer\n      credentials: <token string>\n    # tls config\n    tls:\n      # string, path to the CA certificate file,\n      # this will be used to verify the clients certificates when `skip-verify` is false\n      ca-file:\n      # string, client certificate file.\n      cert-file:\n      # string, client key file.\n      key-file:\n      # boolean, if true, the client will not verify the server\n      # certificate against the available certificate chain.\n      skip-verify: false\n    # duration, defaults to 10s, time interval between write requests\n    interval: 10s\n    # integer, defaults to 1000.\n    # Buffer size for time series to be sent to the remote system.\n    # metrics are sent to the remote system every `.interval` or when the buffer is full. Whichever one is reached first.\n    buffer-size: 1000\n    # integer, defaults to 500, sets the maximum number of timeSeries per write request to remote.\n    max-time-series-per-write: 500\n    # integer, defaults to 0\n    # number of retries per write, retries will have a back off of 100ms.\n    max-retries: 0\n    # metadata configuration\n    metadata:\n      # boolean, \n      # if true, metrics metadata is sent.\n      include: false\n      # duration, defaults to 60s.\n      # Applies if `metadata.include` is set to true\n      # Interval after which all metadata entries are sent to the remote write address\n      interval: 60s\n      # integer, defaults to 500\n      # applies if `metadata.include` is set to true\n      # Max number of metadata entries per write request.\n      max-entries-per-write: 500\n    # string, to be used as the metric namespace\n    metric-prefix: \"\" \n    # boolean, if true the subscription name will be appended to the metric name after the prefix\n    append-subscription-name: false \n    # boolean, enables setting string type values as prometheus metric labels.\n    strings-as-labels: false\n    # duration, defaults to 10s\n    # Push request timeout.\n    timeout: 10s\n    # boolean, defaults to false\n    # Enables debug for prometheus write output.\n    debug: false \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # list of processors to apply on the message before writing\n    event-processors: \n    # an integer, sets the number of worker handling messages to be converted into Prometheus metrics\n    num-workers: 1\n    # an integer, sets the number of writers draining the buffer and writing to Prometheus\n    num-writers: 1\n```\n\n`gnmic` creates the prometheus metric name and its labels from the subscription name, the gnmic path and the value name.\n\n## Metric Generation\n\nThe below diagram shows an example of a prometheus metric generation from a gnmi update\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:12,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/prometheus_transformation.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Fprometheus_transformation.drawio\" async></script>\n\n### Metric Naming\n\nThe metric name starts with the string configured under __metric-prefix__. \n\nThen if __append-subscription-name__ is `true`, the __subscription-name__ as specified in `gnmic` configuration file is appended.\n\nThe resulting string is followed by the gNMI __path__ stripped of its keys if there are any.\n\nAll non-alphanumeric characters are replaced with an underscore \"`_`\"\n\nThe 3 strings are then joined with an underscore \"`_`\"\n\nIf further customization of the metric name is required, the [processors](../event_processors/intro.md) can be used to transform the metric name.\n\nFor example, a gNMI update from subscription `port-stats` with path:\n\n```bash\n/interfaces/interface[name=1/1/1]/subinterfaces/subinterface[index=0]/state/counters/in-octets\n```\n\nis exposed as a metric named:\n\n```bash\ngnmic_port_stats_interfaces_interface_subinterfaces_subinterface_state_counters_in_octets\n```\n\n### Metric Labels\n\nThe metrics labels are generated from the subscription metadata (e.g: `subscription-name` and `source`) and the keys present in the gNMI path elements.\n\nFor the previous example the labels would be:\n\n```bash\n{interface_name=\"1/1/1\",subinterface_index=0,source=\"$routerIP:Port\",subscription_name=\"port-stats\"}\n```\n\n## Prometheus Write Metrics\n\nWhen a Prometheus server (gNMI API) is enabled, `gnmic` prometheus write output exposes 4 prometheus counters and 2 prometheus Gauges:\n\n* `number_of_prometheus_write_msgs_sent_success_total`: Number of msgs successfully sent by gnmic prometheus_write output.\n* `number_of_prometheus_write_msgs_sent_fail_total`: Number of failed msgs sent by gnmic prometheus_write output.\n* `msg_send_duration_ns`: gnmic prometheus_write output send duration in ns.\n\n* `number_of_prometheus_write_metadata_msgs_sent_success_total`: Number of metadata msgs successfully sent by gnmic prometheus_write output.\n* `number_of_prometheus_write_metadata_msgs_sent_fail_total`: Number of failed metadata msgs sent by gnmic prometheus_write output.\n* `metadata_msg_send_duration_ns`: gnmic prometheus_write output metadata send duration in ns.\n"
  },
  {
    "path": "docs/user_guide/outputs/snmp_output.md",
    "content": "`gnmic` supports generating SNMP traps based on received gNMI updates.\n\nThis output type is useful when trying to integrate legacy systems that ingest SNMP traps with more modern telemetry/alarms stacks.\n\nOnly SNMPv2c is supported.\n\n## Configuration\n\nThe SNMP output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  # the output name\n  snmp_trap: \n    # the output type\n    type: snmp\n    # the traps destination address\n    address:\n    # the trap destination port, defaults to 162\n    port: 162\n    # the SNMP trap community\n    community: public\n    # duration, wait time before the first trap evaluation.\n    # defaults to 5s and minimum allowed value is 5s.\n    start-delay: 5s\n    # traps definition\n    traps:\n        # if true, the SNMP message generated is an inform request, not a trap.\n      - inform: false\n        # trap trigger definition,\n        # the trigger section of the trap defines which received path trigger the trap\n        # as well as the variable binding to append to it.\n        trigger:\n          # xpath, if present in the received event message, the trap is triggered\n          path:\n          # a jq script that is executed with the trigger event message as input.\n          # must return a valid OID.\n          oid:\n          # a static string, defining the type of the OID value,\n          # one of: bool, int, bitString, octetString, null, objectID, objectDescription,\n          # ipAddress, counter32, gauge32, timeTicks, opaque, nsapAddress, counter64, \n          # uint32, opaqueFloat, opaqueDouble\n          type:\n          # a jq script that is executed with the trigger event message as input.\n          # must return a value matching the above configured type.\n          value:\n        # trap variable bindings definition,\n        # the bindings section defines the extra variable bindings to append to the trap.\n        # multiple bindings can be defined here.\n        bindings:\n            # A jq script that is executed with the trigger message as input.\n            # Must return a valid xpath.\n            # The local cache is queried using the resulting xpath, the resulting event message is used \n            # as input to execute the below oid and value jq scripts\n          - path:\n            # A jq script that is executed with the message obtained from the cache as input.\n            # must return a valid OID. \n            oid:\n            # a static string, defining the type of the OID value,\n            # one of: bool, int, bitString, octetString, null, objectID, objectDescription,\n            # ipAddress, counter32, gauge32, timeTicks, opaque, nsapAddress, counter64, \n            # uint32, opaqueFloat, opaqueDouble\n            type:\n            # A jq script that is executed with the message obtained from the cache as input.\n            # must return a value matching the above configured type.\n            value:\n```\n\n## How does it work?\n\nThe SNMP output stores each received update message in a local cache (1.a), then checks if the message should trigger any of the configured traps (1.b).\n\nIf the received message triggers a trap (2), an SNMP variable binding is generated from the trap `trigger` configuration section (`OID`, `type` and `value`) based on the triggering event.\nThe `OID` and `value` can be [jq](https://github.com/itchyny/gojq) scripts.\n\nThen (3) for each configured binding, the configured `path` (`jq` script) is rendered based on the triggering event then used to retrieve an event message from the cache, that message is then used to generate the variable binding (`OID`, `type` and `value`).\n\nOnce all bindings are generated, a `sysUpTimeInstance` (OID=`1.3.6.1.2.1.1.3.0`) binding is prepended to the PDU list of the trap, its value is the number of seconds since `gNMIc` SNMP output startup.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/snmp_output.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fopenconfig%2Fgnmic%2Fdiagrams%2Fsnmp_output.drawio\" async></script>\n\n## Metrics\n\nThe SNMP output exposes 4 Prometheus metrics:\n\n- Number of failed trap generation\n\n- Number of SNMP trap sending failures\n\n- SNMP trap generation duration in ns\n\n```text\ngnmic_snmp_output_number_of_snmp_trap_failed_generation{name=\"snmp_trap\",reason=\"\",trap_index=\"0\"} 0\ngnmic_snmp_output_number_of_snmp_trap_sent_fail_total{name=\"snmp_trap\",reason=\"\",trap_index=\"0\"} 0\ngnmic_snmp_output_number_of_snmp_traps_sent_total{name=\"snmp_trap\",trap_index=\"0\"} 114\ngnmic_snmp_output_snmp_trap_generation_duration_ns{name=\"snmp_trap\",trap_index=\"0\"} 380215\n```\n\n## Examples\n\n### interface operational state trap\n\nThe below example generates an SNMPV2 trap whenever the operational state of an interface changes (`ifOperStatus`).\n\nIt adds `sysName`, `ifAdminStatus` and `ifIndex` variable bindings to the trap before sending it out.\n\n```yaml\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\n\ntargets:\n  clab-snmp-srl1:\n  clab-snmp-srl2:\n\nsubscriptions:\n  sub1:\n    paths:\n      - /interface/admin-state\n      - /interface/oper-state\n      - /interface/ifindex\n      - /system/name/host-name\n    stream-mode: on-change\n    encoding: ascii\n\noutputs:\n  snmp_trap:\n    type: snmp\n    address: snmptrap.server\n    # port: 162\n    # community: public\n    traps:\n      - trigger:\n          path: /interface/oper-state # static path\n          oid: '\".1.3.6.1.2.1.2.2.1.8\"' # ifOperStatus\n          type: int\n          value: if (.values.\"/interface/oper-state\" == \"up\") \n                  then 1 \n                  else 2 \n                  end\n        bindings:         \n          - path: '\"/system/name/host-name\"' # jq script\n            oid: '\".1.3.6.1.2.1.1.5\"' # sysName\n            type: octetString\n            value: '.values.\"/system/name/host-name\"'\n\n          - path: '\"/interface[name=\"+.tags.interface_name+\"]/admin-state\"' # jq script\n            oid: '\".1.3.6.1.2.1.2.2.1.7\"' # ifAdminStatus\n            type: int\n            value: if (.values.\"/interface/admin-state\" == \"enable\") \n                    then 1 \n                    else 2 \n                    end\n\n          - path: '\"/interface[name=\"+.tags.interface_name+\"]/ifindex\"' # jq script\n            oid: '\".1.3.6.1.2.1.2.2.1.1\"' # ifIndex\n            type: int\n            value: '.values.\"/interface/ifindex\" | tonumber' # jq script\n```\n"
  },
  {
    "path": "docs/user_guide/outputs/stan_output.md",
    "content": "`gnmic` supports exporting subscription updates to multiple NATS Streaming (STAN) servers/clusters simultaneously\n\nA STAN output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    type: stan # required\n    # comma separated STAN servers\n    address: localhost:4222\n    # stan subject\n    subject: telemetry \n     # stan subject prefix, the subject prefix is built the same way as for NATS output\n    subject-prefix: telemetry\n    # STAN username\n    username:\n    # STAN password\n    password: \n    # STAN publisher name\n    # if left empty, this field is populated with the output name used as output ID (output1 in this example).\n    # the full name will be '$(name)-stan-pub'.\n    # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-stan-pub.\n    # note that each stan worker (publisher) will get client name=$name-$index\n    name: \"\"\n    # cluster name, mandatory\n    cluster-name: test-cluster\n    # STAN ping interval\n    ping-interval: 5\n    # STAN ping retry\n    ping-retry: 2\n    # string, message marshaling format, one of: proto, prototext, protojson, json, event\n    format:  event \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # duration to wait before re establishing a lost connection to a stan server\n    recovery-wait-time: 2s\n    # integer, number of stan publishers to be created\n    num-workers: 1 \n    # boolean, enables extra logging for the STAN output\n    debug: false \n    # duration after which a message waiting to be handled by a worker gets discarded\n    write-timeout: 10s \n    # boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: \n```\n\nUsing `subject` config value a user can specify the STAN subject to which to send all subscriptions updates for all targets\n\nIf a user wants to separate updates by targets and by subscriptions, `subject-prefix` can be used. if `subject-prefix` is specified `subject` is ignored.\n"
  },
  {
    "path": "docs/user_guide/outputs/tcp_output.md",
    "content": "`gnmic` supports exporting subscription updates to a TCP server\n\nA TCP output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: tcp \n    # a UDP server address \n    address: IPAddress:Port \n    # maximum sending rate, e.g: 1ns, 10ms\n    rate: 10ms \n    # number of messages to buffer in case of sending failure\n    buffer-size:\n    # export format. json, protobuf, prototext, protojson, event\n    format: json \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # string, a delimiter to be sent after each message.\n    # useful when writing to logstash TCP input.\n    delimiter:\n    # enable TCP keepalive and specify the timer, e.g: 1s, 30s\n    keep-alive: \n    # time duration to wait before re-dial in case there is a failure\n    retry-interval: \n    # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metricss\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: \n```\n\nA TCP output can be used to export data to an ELK stack, using [Logstash TCP input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html)"
  },
  {
    "path": "docs/user_guide/outputs/udp_output.md",
    "content": "`gnmic` supports exporting subscription updates to a UDP server\n\nA UDP output can be defined using the below format in `gnmic` config file under `outputs` section:\n\n```yaml\noutputs:\n  output1:\n    # required\n    type: udp \n    # a UDP server address \n    address: IPAddress:Port\n    # maximum sending rate, e.g: 1ns, 10ms\n    rate: 10ms \n    # number of messages to buffer in case of sending failure\n    buffer-size: \n    # export format. json, protobuf, prototext, protojson, event\n    format: json \n    # string, one of `overwrite`, `if-not-present`, ``\n    # This field allows populating/changing the value of Prefix.Target in the received message.\n    # if set to ``, nothing changes \n    # if set to `overwrite`, the target value is overwritten using the template configured under `target-template`\n    # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template`\n    add-target: \n    # string, a GoTemplate that allow for the customization of the target field in Prefix.Target.\n    # it applies only if the previous field `add-target` is not empty.\n    # if left empty, it defaults to:\n    # {{- if index . \"subscription-target\" -}}\n    # {{ index . \"subscription-target\" }}\n    # {{- else -}}\n    # {{ index . \"source\" | host }}\n    # {{- end -}}`\n    # which will set the target to the value configured under `subscription.$subscription-name.target` if any,\n    # otherwise it will set it to the target name stripped of the port number (if present)\n    target-template:\n    # boolean, valid only if format is `event`.\n    # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts.\n    split-events: false\n    # boolean, if true the message timestamp is changed to current time\n    override-timestamps: false\n    # time duration to wait before re-dial in case there is a failure\n    retry-interval: \n    # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metrics\n    enable-metrics: false \n    # list of processors to apply on the message before writing\n    event-processors: \n```\n\nA UDP output can be used to export data to an ELK stack, using [Logstash UDP input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-udp.html)"
  },
  {
    "path": "docs/user_guide/prompt_suggestions.md",
    "content": "Starting with `gnmic v0.4.0` release the users can enjoy the interactive prompt mode which can be enabled with the [`prompt`](../cmd/prompt.md) command.\n\n<script id=\"asciicast-QaJRqrLSOGvgcAavybsMRzD7c\" data-autoplay=\"true\" data-loop=\"true\" src=\"https://asciinema.org/a/QaJRqrLSOGvgcAavybsMRzD7c.js\" async></script>\n\nThe prompt mode delivers two major features:\n\n- simplifies `gnmic` commands and flags navigation, as every option is suggested and auto-completed\n- provides interactive YANG path auto-suggestions for `get`, `set`, `subscribe` commands effectively making the terminal your YANG browser\n\n## Using the prompt interface\nDepending on the cursor position in the prompt line, a so-called _suggestion box_ pops up with contextual auto-completions. The user can enter the suggestion box by pressing the <kbd>TAB</kbd> key. The <kbd>↑</kbd> and <kbd>↓</kbd> keys can be used to navigate the suggestion list.\n\nSelect the suggested menu item with <kbd>SPACE</kbd> key or directly commit your command with <kbd>ENTER</kbd>, its that easy!\n\nThe following most-common key bindings will work in the prompt mode:\n\n| Key combination                            | Description                                              |\n| ------------------------------------------ | -------------------------------------------------------- |\n| <kbd>Option/Control</kbd> + <kbd>→/←</kbd> | move cursor a word right/left                            |\n| <kbd>Control</kbd> + <kbd>W</kbd>          | delete a word to the left                                |\n| <kbd>Control</kbd> + <kbd>Z</kbd>          | delete a path element in the xpath string ([example][1]) |\n| <kbd>Control</kbd> + <kbd>A</kbd>          | move cursor to the beginning of a line                   |\n| <kbd>Control</kbd> + <kbd>E</kbd>          | move cursor to the end of a line                         |\n| <kbd>Control</kbd> + <kbd>C</kbd>          | discard the current line                                 |\n| <kbd>Control</kbd> + <kbd>D</kbd>          | exit prompt                                              |\n| <kbd>Control</kbd> + <kbd>K</kbd>          | delete the line after the cursor to the clipboard        |\n| <kbd>Control</kbd> + <kbd>U</kbd>          | delete the line before the cursor to the clipboard       |\n| <kbd>Control</kbd> + <kbd>L</kbd>          | clear screen                                             |\n\n## Commands and flags suggestions\nTo make `gnmic` configurable and flexible we introduced a considerable amount of flags and sub-commands.  \nTo help the users navigate the sheer selection of `gnmic` configuration options, the prompt mode will auto-suggest the global flags, sub-commands and local flags of those sub-commands.\n\nWhen the prompt mode is launched, the suggestions will be shown for the top-level commands and all the global flags. Once the sub-command is typed into the terminal, the auto-suggestions will be provided for the commands nested under this command and its local flags.\n\nIn the following demo we show how the command and flag suggestions work. As the prompt starts, the suggestion box immediately hints what commands and global flags are available for input as well as their description.\n\nThe user starts with adding the global flags `--address, --insecure, --username` and then selects the `capabilities` command and commits it. This results in gNMI Capability RPC execution against a specified target.\n\n<script id=\"asciicast-zsACIBIUiiyoeqgYQ82EjUCIM\" src=\"https://asciinema.org/a/zsACIBIUiiyoeqgYQ82EjUCIM.js\" async></script>\n\n### Mixed mode\nIts perfectly fine to specify some global flags outside of the prompt command and add more within the prompt mode. For example, the following is a valid invocation:\n\n```\ngnmic --insecure --username admin --password admin --address 10.1.0.11 prompt\n```\n\nHere the prompt will start with with the `insecure, username, password, address` flags set.\n\n## YANG-completions\nOne of the most challenging problems in the network automation field is to process the YANG models and traverse YANG trees to construct the requests used against the network elements.  \nBe it gNMI, NETCONF or RESTCONF a users still needs to have a path pointing to specific YANG-defined node which is targeted by a request.\n\nIn gNMI paths can be represented in a [human readable XPATH-like form](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-path-conventions.md#constructing-paths) - `/a/b/c[key=val]/d` - and these paths are based on the underlying YANG models.  \nThe problem at hand was how to get these paths interactively, or even better - walk the YANG tree from within the CLI and dynamically build the path used in a gNMI RPC?\n\nWith **YANG-completions** feature embedded in `gnmic` what used to be a dream is now a reality 🎉\n\n<p align=center><script id=\"asciicast-G1O3pN7xRMLe0tqHjBvDJ7mYA\" src=\"https://asciinema.org/a/G1O3pN7xRMLe0tqHjBvDJ7mYA.js\" async></script></p>\n\nLet us explain what just happened there.\n\nIn the demonstration above, we called the `gnmic` with the well-known flags defining the gNMI target (`address`, `username`, `password`). But this time we also added a few YANG specific flags ([`--file`](../cmd/prompt.md#file) and [`--dir`](../cmd/prompt.md#dir)) that load the full set of Nokia SR OS YANG models and the 3rd party models SR OS rely on.\n\n```\ngnmic --address 10.1.0.11 --insecure --username admin --password admin \\\n      --file ~/7x50_YangModels/YANG/nokia-combined \\\n      --dir ~/7x50_YangModels/YANG \\\n      prompt\n```\n\nIn the background `gnmic` processed these YANG models to build the entire schema tree of the Nokia SR OS state and configuration datastores. With that in-mem stored information, `gnmic` was able to auto-suggest all the possible YANG paths when the user entered the `--path` flag which accepts gNMI paths.\n\nBy using the auto-suggestion hints, a user navigated the `/state` tree of a router and drilled down to the version-number leaf that, in the end, was retrieved with the gNMI Get RPC.\n\n!!! success \"YANG-driven path suggestions\"\n    `gnmic` is now capable of reading and processing YANG modules to enable live path auto-suggestions\n\n### YANG processing\nFor the YANG-completion feature to work its absolutely imperative for `gnmic` to successfully parse and compile the YANG models.\n\nThe [`prompt`](../cmd/prompt.md) command leverages the [`--file`](../cmd/prompt.md#file) and [`--dir`](../cmd/prompt.md#dir) flags to select the YANG models for processing.\n\n\nWith the `--file` flag a user specifies a file path to a YANG file or a directory of them that `gnmic` will read and process. If it points to a directory it will be visited recursively reading in all `*.yang` files it finds.\n\nThe `--dir` flag also points to a YANG file or a directory and indicates which additional YANG files might be required. For example, if the YANG modules that a user specified with the `--file` flag import or include modules that were not part of the path specified with `--file`, they need to be added with the `--dir` flag.\n\nThe [Examples](#examples) section provide some good practical examples on how these two flags can be used together to process the YANG models from different vendors.\n\n### Understanding path suggestions\nWhen `gnmic` provides a user with the path suggestions it does it in a smart and intuitive way.\n\n![path suggestions](https://gitlab.com/rdodin/pics/-/wikis/uploads/d3815b474605765989d136753c0f9c87/image.png)\n\nFirst, it understands in what part of the tree a user currently is and suggests only the next possible elements.\n\nAdditionally, the suggested next path elements will be augmented with the information extracted from the YANG model, such as:\n\n* element description, as given in the YANG `description` statement for the element\n* element configuration state (`rw` / `ro`), as defined in section [4.2.3 of RFC 7950](https://tools.ietf.org/html/rfc7950#section-4.2.3).\n* node type:\n    * The containers and lists will be denoted with the `[+]` marker, which means that a user can type `/` char after them to receive suggestions for the nested elements.\n    * the `[⋯]` character belongs to a leaf-list element.\n    * an empty space will indicate the leaf element.\n\n### Examples\nThe examples in this section will show how to use the `--file` and `--dir` flags of the [`prompt`](../cmd/prompt.md) command with the YANG collections from different vendors and standard bodies.\n\n#### Nokia SR OS\nYANG repo: [nokia/7x50_YangModels](https://github.com/nokia/7x50_YangModels)\n\nClone the repository with Nokia YANG models and checkout the release of interest:\n\n```\ngit clone https://github.com/nokia/7x50_YangModels\ncd 7x50_YangModels\ngit checkout sros_20.7.r2\n```\n\nStart `gnmic` in prompt mode and read in the nokia-combined YANG modules:\n\n```\ngnmic --file YANG/nokia-combined \\\n      --dir YANG \\\n      prompt\n```\n\nThis will enable path auto-suggestions for the entire tree of the Nokia SR OS YANG models.\n\nThe full command with the gNMI target specified could look like this:\n\n```\ngnmic --address 10.1.0.11 --insecure --username admin --password admin \\\n      prompt \\\n      --file ~/7x50_YangModels/YANG/nokia-combined \\\n      --dir ~/7x50_YangModels/YANG\n```\n\n#### Openconfig\nYANG repo: [openconfig/public](https://github.com/openconfig/public)\n\nClone the OpenConfig repository:\n\n```\ngit clone https://github.com/openconfig/public\ncd public\n```\n\nStart `gnmic` in prompt mode and read in all the modules:\n\n```\ngnmic --file release/models \\\n      --dir third_party \\\n      --exclude ietf-interfaces \\\n      prompt\n```\n\n<script id=\"asciicast-pcEq80BAs0N9RvgMLZTYJ9S8I\" src=\"https://asciinema.org/a/pcEq80BAs0N9RvgMLZTYJ9S8I.js\" async></script>\n\n!!! note\n    With OpenConfig models we have to use `--exclude` flag to exclude ietf-interfaces module from being clashed with OpenConfig interfaces module.\n\n#### Cisco\nYANG repo: [YangModels/yang](https://github.com/YangModels/yang)\n\nClone the `YangModels/yang` repo and change into the main directory of the repo:\n\n```\ngit clone https://github.com/YangModels/yang\ncd yang/vendor\n```\n\n##### IOS-XR\nThe IOS-XR native YANG models are disaggregated and spread all over the place. Although its technically possible to load them all in one go, this approach will produce a lot of top-level modules making the navigation quite hard.\n\nAn easier and cleaner approach would be to find the relevant module(s) and load them separately or in small batches. For example here we load BGP config and operational models together:\n\n```\ngnmic --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \\\n      --file vendor/cisco/xr/721/Cisco-IOS-XR-ipv4-bgp-oper.yang \\\n      --dir standard/ietf \\\n      prompt\n```\n\n!!! note\n    We needed to include the `ietf/` directory by means of the `--dir` flag, since the Cisco's native modules rely on the IETF modules and these modules are not in the same directory as the BGP modules.\n\nThe full command that you can against the real Cisco IOS-XR node must have a target defined, the encoding set and origin suggestions enabled. Here is what it can look like:\n\n```\ngnmic -a 10.10.30.5:57500 --insecure -e json_ietf -u admin -p Cisco123 \\\n      prompt \\\n      --file yang/vendor/cisco/xr/662/Cisco-IOS-XR-ipv4-bgp-cfg.yang \\\n      --file yang/vendor/cisco/xr/662/Cisco-IOS-XR-ipv4-bgp-oper.yang \\\n      --dir yang/standard/ietf \\\n      --suggest-with-origin\n```\n\n##### NX-OS\nCisco NX-OS native modules, on the other hand, are aggregated in a single file, here is how you can generate the suggestions from it:\n\n```\ngnmic --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \\\n      --dir standard/ietf \\\n      prompt\n```\n\n#### Juniper\nYANG repo: [Juniper/yang](https://github.com/Juniper/yang)\n\nClone the Juniper YANG repository and change into the release directory:\n\n```\ngit clone https://github.com/Juniper/yang\ncd yang/20.3/20.3R1\n```\n\nStart `gnmic` and generate path suggestions for the whole configuration tree of Juniper MX:\n\n```\ngnmic --file junos/conf --dir common prompt\n```\n\n!!! note\n    1. Juniper models are constructed in a way that a top-level container appears to be `/configuration`, that will not work with your gNMI Subscribe RPC. Instead, you should omit this top level container. So, for example, the suggested path `/configuration/interfaces/interface/state` should become `/interfaces/interface/state`.\n    2. Juniper vMX doesn't support gNMI Get RPC, if you plan to test it, use gNMI Subscribe RPC\n    3. With gNMI Subscribe, specify `-e proto` flag to enable protobuf encoding.\n\n#### Arista\nYANG repo: [aristanetworks/yang](https://github.com/aristanetworks/yang)\n\nArista uses a subset of OpenConfig modules and does not provide IETF modules inside their repo. So make sure you have IETF models available so you can reference it, a `openconfig/public` is a good candidate.\n\nClone the Arista YANG repo:\n\n```\ngit clone https://github.com/aristanetworks/yang\ncd yang\n```\n\nGenerate path suggestions for all Arista OpenConfig modules:\n\n```\ngnmic --file EOS-4.23.2F/openconfig/public/release/models \\\n      --dir ~/public/third_party/ietf \\\n      --exclude ietf-interfaces \\\n      prompt\n```\n\n## Enumeration suggestions\n`gnmic` flags that can take pre-defined values (enumerations) will get suggestions as well. For example, no need to keep in mind which subscription modes are available, the prompt will hint you:\n\n![enum suggestion](https://gitlab.com/rdodin/pics/-/wikis/uploads/a2772c709d869d5efc299db451e3d4a9/image.png)\n\n## File-path completions\nWhenever a user needs to provide a file path in a prompt mode, the filepath suggestions will make the process interactive:\n\n<script id=\"asciicast-uJyTI4nnQ52lSpIw5Ec7INLe7\" src=\"https://asciinema.org/a/uJyTI4nnQ52lSpIw5Ec7INLe7.js\" async></script>\n\n[1]: https://gitlab.com/rdodin/pics/-/wikis/uploads/cc97ef563e2b973da512951fedd1ddb8/CleanShot_2020-10-21_at_11.37.57.mp4"
  },
  {
    "path": "docs/user_guide/subscriptions.md",
    "content": "\nDefining subscriptions with [`subscribe`](../cmd/subscribe.md) command's CLI flags is a quick&easy way to work with gNMI subscriptions. \n\nA downside of that approach is that commands can get lengthy when defining multiple subscriptions and not all possible flavors and combinations of subscription can be defined.\n\nWith the multiple subscriptions defined in the [configuration file](configuration_file.md) we make a complex task of managing multiple subscriptions for multiple targets easy. The idea behind the multiple subscriptions is to define the subscriptions separately and then bind them to the targets.\n\n## Defining subscriptions\n\n### CLI-based subscription\n\nA subscription is configured through a series of command-line interface (CLI) flags. These include, *but are not limited to*:\n\n1. `--path`: This flag is used to set the paths for the subscription.\n\n2. `--mode [once | poll | stream]`: Defines the subscription mode. It can be set to once, poll, or stream.\n\n3. `--stream-mode [target-defined | sample | on-change]`: Sets the stream subscription mode. The options are target-defined, sample, or on-change.\n\n4. `--sample-interval`: Determines the sample interval for a stream/sample subscription.\n\nA command executed with these flags will generate a single [SubscribeRequest](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3511-the-subscriberequest-message) that is sent to the target.\n\nEvery path configured with the `--path` flag leads to a [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message.\n\nThere are no constraints when defining a `ONCE` or `POLL` subscribe request. However, when a `STREAM` subscribe request is defined using flags, all subscriptions (paths) will adopt the same mode (`target-defined`, `on-change`, or `sample`) and stream subscription attributes such as `sample-interval` and `heartbeat-interval`.\n\n### File-based subscription config\n\nTo define a subscription a user needs to create the `subscriptions` container in the configuration file:\n\n```yaml\nsubscriptions:\n  # a configurable subscription name\n  subscription-name:\n    # string, path to be set as the Subscribe Request Prefix\n    prefix:\n    # string, value to set as the SubscribeRequest Prefix Target\n    target:\n    # boolean, if true, the SubscribeRequest Prefix Target will be set to \n    # the configured target name under section `targets`.\n    # does not apply if the previous field `target` is set.\n    set-target: # true | false\n    # list of strings, list of subscription paths for the named subscription\n    paths: []\n    # list of strings, schema definition modules\n    models: []\n    # string, case insensitive, one of ONCE, STREAM, POLL\n    mode: STREAM\n    # string, case insensitive, if `mode` is set to STREAM, this defines the type \n    # of streamed subscription,\n    # one of SAMPLE, TARGET_DEFINED, ON_CHANGE\n    stream-mode: TARGET_DEFINED\n    # string, case insensitive, defines the gNMI encoding to be used for the subscription\n    encoding: JSON\n    # integer, specifies the packet marking that is to be used for the subscribe responses\n    qos:\n    # duration, Golang duration format, e.g: 1s, 1m30s, 1h.\n    # specifies the sample interval for a STREAM/SAMPLE subscription\n    sample-interval:\n    # duration, Golang duration format, e.g: 1s, 1m30s, 1h.\n    # The heartbeat interval value can be specified along with `ON_CHANGE` or `SAMPLE` \n    # stream subscriptions modes and has the following meanings in each case:\n    # - `ON_CHANGE`: The value of the data item(s) MUST be re-sent once per heartbeat \n    #                interval regardless of whether the value has changed or not.\n    # - `SAMPLE`: The target MUST generate one telemetry update per heartbeat interval, \n    #             regardless of whether the `--suppress-redundant` flag is set to true.\n    heartbeat-interval:\n    # boolean, if set to true, the target SHOULD NOT generate a telemetry update message unless \n    # the value of the path being reported on has changed since the last \n    suppress-redundant:\n    # boolean, if set to true, the target MUST not transmit the current state of the paths \n    # that the client has subscribed to, but rather should send only updates to them.\n    updates-only:\n    # list of strings, the list of outputs to send updates to. If blank, defaults to all outputs\n    outputs:\n      - output1\n      - output2\n    # list of subscription definition, this field is used to define multiple stream subscriptions (target-defined, sample or on-change)\n    # that will be created using a single SubscribeRequest (i.e: share the same gRPC stream).\n    # This field cannot be defined if `paths`, `stream-mode`, `sample-interval`, `heartbeat-interval` or`suppress-redundant` are set.\n    # Only fields applicable to STREAM subscriptions can be set in this list of subscriptions: \n    # `paths`, `stream-mode`, `sample-interval`, `heartbeat-interval` or`suppress-redundant`\n    stream-subscriptions:\n      - paths: []\n        stream-mode: \n        sample-interval:\n        heartbeat-interval:\n        suppress-redundant:\n      - paths: []\n        stream-mode: \n        sample-interval:\n        heartbeat-interval:\n        suppress-redundant:\n    # historical subscription config: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose    \n    history:\n      # string, nanoseconds since Unix epoch or RFC3339 format.\n      # if set, the history extension type will be a Snapshot request\n      snapshot:\n      # string, nanoseconds since Unix epoch or RFC3339 format.\n      # if set, the history extension type will be a Range request\n      start:\n      # string, nanoseconds since Unix epoch or RFC3339 format.\n      # if set, the history extension type will be a Range request\n      end:\n    # uint32, depth value as per: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md\n    depth: 0\n```\n\n#### Subscription config to gNMI SubscribeRequest\n\nEach subscription (under `subscriptions:`) results in a single [`SubscribeRequest`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3511-the-subscriberequest-message) being sent to the target.\n\nIf `paths` is set, each path results in a separate [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) message being added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message.\n\nIf instead of paths, a list of stream-subscriptions is defined:\n\n```yaml\nsubscriptions:\n  sub1:\n    stream-subscriptions:\n      - paths:\n```\n\nEach path under each stream-subscriptions will result in a separate [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) message being added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message.\n\n#### Examples\n\n##### A single stream/sample subscription\n\n=== \"YAML\"\n    ```yaml\n    subscriptions:\n      port_stats:\n        paths:\n          - \"/state/port[port-id=*]/statistics\"\n        stream-mode: sample\n        sample-interval: 5s\n        encoding: bytes\n    ```\n=== \"CLI\"\n    ```shell\n    gnmic sub --path /state/port/statistics \\\n              --stream-mode sample \\\n              --sample-interval 5s \\\n              --encoding bytes\n    ```\n=== \"PROTOTEXT\"\n    ```text\n    subscribe: {\n      subscription: {\n        path: {\n          elem: {\n            name: \"state\"\n          }\n          elem: {\n            name: \"port\"\n          }\n          elem: {\n            name: \"statistics\"\n          }\n        }\n        mode: SAMPLE\n        sample_interval:  5000000000\n      }\n      encoding: BYTES\n    }\n    ```\n\n##### A single stream/on-change subscription\n\n=== \"YAML\"\n    ```yaml\n    subscriptions:\n      port_stats:\n        paths:\n          - \"/state/port/oper-state\"\n        stream-mode: on-change\n        encoding: bytes\n    ```\n=== \"CLI\"\n    ```shell\n    gnmic sub --path /state/port/oper-state \\\n              --stream-mode on-change \\\n              --encoding bytes\n    ```\n=== \"PROTOTEXT\"\n    ```text\n    subscribe: {\n      subscription: {\n        path: {\n          elem: {\n            name: \"state\"\n          }\n          elem: {\n            name: \"port\"\n          }\n          elem: {\n            name: \"oper-state\"\n          }\n        }\n        mode: ON_CHANGE\n      }\n      encoding: BYTES\n    }\n    ```\n\n##### A ONCE subscription\n\n=== \"YAML\"\n    ```yaml\n    subscriptions:\n      system_facts:\n        paths:\n          - /configure/system/name\n          - /state/system/version\n        mode: once\n        encoding: bytes\n    ```\n=== \"CLI\"\n    ```shell\n    gnmic sub --path /configure/system/name \\\n              --path /state/system/version \\\n              --mode once \\\n              --encoding bytes\n    ```\n=== \"PROTOTEXT\"\n    ```text\n    subscribe: {\n      subscription: {\n        path: {\n          elem: {\n            name: \"configure\"\n          }\n          elem: {\n            name: \"port\"\n          }\n          elem: {\n            name: \"name\"\n          }\n        }\n      }\n      subscription: {\n        path: {\n          elem: {\n            name: \"state\"\n          }\n          elem: {\n            name: \"system\"\n          }\n          elem: {\n            name: \"version\"\n          }\n        }\n      }\n      mode: ONCE\n      encoding: BYTES\n    }\n    ```\n\n##### Combining multiple stream subscriptions in the same gRPC stream\n\n=== \"YAML\"\n    ```yaml\n    subscriptions:\n      sub1:\n        stream-subscriptions:\n          - paths:\n            - /configure/system/name\n            stream-mode: on-change\n          - paths:\n            - /state/port/statistics\n            stream-mode: sample\n            sample-interval: 10s  \n        encoding: bytes\n    ```\n=== \"CLI\"\n    NA\n=== \"PROTOTEXT\"\n    ```text\n    subscribe: {\n      subscription: {\n        path: {\n          elem: {\n            name: \"configure\"\n          }\n          elem: {\n            name: \"system\"\n          }\n          elem: {\n            name: \"name\"\n          }\n        }\n        mode: ON_CHANGE\n      }\n      subscription: {\n        path: {\n          elem: {\n            name: \"state\"\n          }\n          elem: {\n            name: \"port\"\n          }\n          elem: {\n            name: \"statistics\"\n          }\n        }\n        mode: SAMPLE\n        sample_interval:  10000000000\n      }\n      encoding: BYTES\n    }\n    ```\n\n##### Configure multiple subscriptions\n\n```yaml\n# part of ~/gnmic.yml config file\nsubscriptions:  # container for subscriptions\n  port_stats:     # a named subscription, a key is a name\n    paths:      # list of subscription paths for that named subscription\n      - \"/state/port[port-id=1/1/c1/1]/statistics/out-octets\"\n      - \"/state/port[port-id=1/1/c1/1]/statistics/in-octets\"\n    stream-mode: sample # one of [on-change target-defined sample]\n    sample-interval: 5s\n    encoding: bytes\n  service_state:\n    paths:\n      - \"/state/service/vpls[service-name=*]/oper-state\"\n      - \"/state/service/vprn[service-name=*]/oper-state\"\n    stream-mode: on-change\n  system_facts:\n    paths:\n      - \"/configure/system/name\"\n      - \"/state/system/version\"\n    mode: once\n```\n\nInside that subscriptions container a user defines individual named subscriptions; in the example above two named subscriptions `port_stats` and `service_state` were defined.\n\nThese subscriptions can be used on the cli via the `[ --name ]` flag of subscribe command:\n\n```shell\ngnmic subscribe --name service_state --name port_stats\n```\n\nOr by binding them to different targets, (see next section)\n\n## Binding subscriptions\n\nOnce the subscriptions are defined, they can be flexibly associated with the targets.\n\n```yaml\n# part of ~/gnmic.yml config file\ntargets:\n  router1.lab.com:\n    username: admin\n    password: secret\n    subscriptions:\n      - port_stats\n      - service_state\n  router2.lab.com:\n    username: gnmi\n    password: telemetry\n    subscriptions:\n      - service_state\n```\n\nThe named subscriptions are put under the `subscriptions` section of a target container. As shown in the example above, it is allowed to add multiple named subscriptions under a single target; in that case each named subscription will result in a separate Subscription Request towards a target.\n\n!!! note\n    If a target is not explicitly associated with any subscription, the client will subscribe to all defined subscriptions in the file.\n\nThe full configuration with the subscriptions defined and associated with targets will look like this:\n\n```yaml\nusername: admin\npassword: nokiasr0s\ninsecure: true\n\ntargets:\n  router1.lab.com:\n    subscriptions:\n      - port_stats\n      - service_state\n      - system_facts\n  router2.lab.com:\n    subscriptions:\n      - service_state\n      - system_facts\n\nsubscriptions:\n  port_stats:\n    paths:\n      - \"/state/port[port-id=1/1/c1/1]/statistics/out-octets\"\n      - \"/state/port[port-id=1/1/c1/1]/statistics/in-octets\"\n    stream-mode: sample\n    sample-interval: 5s\n    encoding: bytes\n  service_state:\n    paths:\n       - \"/state/service/vpls[service-name=*]/oper-state\"\n       - \"/state/service/vprn[service-name=*]/oper-state\"\n    stream-mode: on-change\n  system_facts:\n    paths:\n       - \"/configure/system/name\"\n       - \"/state/system/version\"\n    mode: once\n```\n\nAs a result of such configuration the `gnmic` will set up three gNMI subscriptions to router1 and two other gNMI subscriptions to router2:\n\n```shell\n$ gnmic subscribe\ngnmic 2020/07/06 22:03:35.579942 target 'router2.lab.com' initialized\ngnmic 2020/07/06 22:03:35.593082 target 'router1.lab.com' initialized\n```\n\n```json\n{\n  \"source\": \"router2.lab.com\",\n  \"subscription-name\": \"service_state\",\n  \"timestamp\": 1594065869313065895,\n  \"time\": \"2020-07-06T22:04:29.313065895+02:00\",\n  \"prefix\": \"state/service/vpls[service-name=testvpls]\",\n  \"updates\": [\n    {\n      \"Path\": \"oper-state\",\n      \"values\": {\n        \"oper-state\": \"down\"\n      }\n    }\n  ]\n}\n{\n  \"source\": \"router1.lab.com\",\n  \"subscription-name\": \"service_state\",\n  \"timestamp\": 1594065868850351364,\n  \"time\": \"2020-07-06T22:04:28.850351364+02:00\",\n  \"prefix\": \"state/service/vpls[service-name=test]\",\n  \"updates\": [\n    {\n      \"Path\": \"oper-state\",\n      \"values\": {\n        \"oper-state\": \"down\"\n      }\n    }\n  ]\n}\n{\n  \"source\": \"router1.lab.com\",\n  \"subscription-name\": \"port_stats\",\n  \"timestamp\": 1594065873938155916,\n  \"time\": \"2020-07-06T22:04:33.938155916+02:00\",\n  \"prefix\": \"state/port[port-id=1/1/c1/1]/statistics\",\n  \"updates\": [\n    {\n      \"Path\": \"in-octets\",\n      \"values\": {\n        \"in-octets\": \"671552\"\n      }\n    }\n  ]\n}\n{\n  \"source\": \"router1.lab.com\",\n  \"subscription-name\": \"port_stats\",\n  \"timestamp\": 1594065873938043848,\n  \"time\": \"2020-07-06T22:04:33.938043848+02:00\",\n  \"prefix\": \"state/port[port-id=1/1/c1/1]/statistics\",\n  \"updates\": [\n    {\n      \"Path\": \"out-octets\",\n      \"values\": {\n        \"out-octets\": \"370930\"\n      }\n    }\n  ]\n}\n^C\nreceived signal 'interrupt'. terminating...\n```\n"
  },
  {
    "path": "docs/user_guide/targets/target_discovery/consul_discovery.md",
    "content": "The Consul target loader discovers gNMI targets registered as service instances in a Consul Server.\n\nThe loader watches services registered in Consul defined by a service name and optionally a set of tags.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:2,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n### Services watch\n\nWhen at least one service name is set, gNMIc consul loader will watch the instances registered under that service name and build a target configuration using the service ID as the target name and the registered address and port as the target address.\n\nThe remaining configuration can be set under the service name definition.\n\n```yaml\nloader:\n  type: consul\n  services:\n    - name: cluster1-gnmi-server\n      config:\n        insecure: true\n        username: admin\n        password: admin\n```\n\n### Templating with Consul\n\nIt is possible to set the target name to something other than the Consul Service ID using the `name` field under the config. The target name can be customized using [Go Templates](https://golang.org/pkg/text/template/).\n\nIn addition to setting the target name, it is also possible to use Go Templates on `event-tags` as well.\n\nThe templates use the Service under Consul, so access to things like `ID`, `Tags`, `Meta`, etc. are all available.\n\n```yaml\nloader:\n  type: consul\n  services:\n    - name: cluster1-gnmi-server\n      config:\n        name: \"{{.Meta.device}}\"\n        event-tags:\n            location: \"{{.Meta.site_name}}\"\n            model: \"{{.Meta.device_type}}\"\n            tag-1: \"{{.Meta.tag_1}}\"\n            boring-static-tag: \"hello\"\n```\n\n### Configuration\n\n```yaml\nloader:\n  type: consul\n  # address of the loader server\n  address: localhost:8500\n  # Consul Data center, defaults to dc1\n  datacenter: dc1\n  # Consul username, to be used as part of HTTP basicAuth\n  username:\n  # Consul password, to be used as part of HTTP basicAuth\n  password:\n  # Consul Token, is used to provide a per-request ACL token which overrides the agent's default token\n  token:\n  # the key prefix to watch for targets configuration, defaults to \"gnmic/config/targets\"\n  key-prefix: gnmic/config/targets\n  # if true, registers consulLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n  # list of services to watch and derive target configurations from.\n  services:\n      # name of the Consul service\n    - name:\n      # a list of strings to further filter the service instances\n      tags: \n      # configuration map to apply to target discovered from this service\n      config:\n  # list of actions to run on target discovery\n  on-add:\n  # list of actions to run on target removal\n  on-delete:\n  # variable dict to pass to actions to be run\n  vars:\n  # path to variable file, the variables defined will be passed to the actions to be run\n  # values in this file will be overwritten by the ones defined in `vars`\n  vars-file:\n```\n"
  },
  {
    "path": "docs/user_guide/targets/target_discovery/discovery_intro.md",
    "content": "## Introduction\n\n`gnmic` supports dynamic loading of gNMI targets from external systems.\nThis feature allows adding and deleting gNMI targets without the need to restart `gnmic`.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:0,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\nDepending on the discovery method, `gnmic` will either:\n\n- Subscribe to changes on the remote system,\n- Or poll the defined targets from the remote systems.\n  \nWhen a change is detected, the new targets are added and the corresponding subscriptions are immediately established.\nThe removed targets are deleted together with their subscriptions.\n\nActions can be run on target discovery (on-add or on-delete), this can be useful to add initial configurations to target ahead of gNMI subscriptions or run checks before subscribing.\nIn the case of on-add actions,\n\n!!! notes\n    1. Only one discovery type is supported at a time.\n\n    2. Target updates are not supported, delete and re-add is the way to update a target configuration.\n\n## Discovery types\n\nFour types of target discovery methods are supported:\n\n### [File Loader](./file_discovery.md)\n\nWatches changes to a local file containing gNMI targets definitions.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n### [Consul Server Loader](./consul_discovery.md)\n\nSubscribes to Consul KV key prefix changes, the keys and their value represent a target configuration fields.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:2,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n### [Docker Engine Loader](./docker_discovery.md)\n\nPolls containers from a Docker Engine host matching some predefined criteria (docker filters).\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:3,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n### [HTTP Loader](./http_discovery.md)\n\nQueries an HTTP endpoint periodically, expected a well formatted JSON dict of targets configurations.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:4,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n## Expanding Environment Variables in Loaded Configuration\n\nYou can use environment variables within your loaded target configurations. To enable this feature, set `expand-env` to `true` under the loader configuration:\n\n```yaml\nloader:\n  type: consul\n  expand-env: true\n  # loader specific configuration\n```\n## Running Actions On Discovery\n\nAll actions support fields `on-add` and `on-delete` which take a list of predefined action names that will be run sequentially on target discovery or deletion.\n\nThe below configuration example defines 3 actions `configure_interfaces`, `configure_subinterfaces` and `configure_network_instance` which will run when the `docker` loader discovers a target with label `clab-node-kind=srl`\n\n``` yaml\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label: clab-node-kind=srl\n      config:\n        skip-verify: true\n        username: admin\n        password: NokiaSrl1!\n  on-add:\n    - configure_interfaces\n    - configure_subinterfaces\n    - configure_network_instances\n \nactions:\n  configure_interfaces:\n    name: configure_interfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /interface[name=ethernet-1/1]/admin-state\n      - /interface[name=ethernet-1/2]/admin-state \n    values:\n      - enable\n      - enable\n  configure_subinterfaces:\n    name: configure_subinterfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /interface[name=ethernet-1/1]/subinterface[index=0]/admin-state\n      - /interface[name=ethernet-1/2]/subinterface[index=0]/admin-state \n    values:\n      - enable\n      - enable\n  configure_network_instances:\n    name: configure_network_instances\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths:\n      - /network-instance[name=default]/admin-state\n      - /network-instance[name=default]/interface\n      - /network-instance[name=default]/interface\n    values:\n      - enable\n      - '{\"name\": \"ethernet-1/1.0\"}'\n      - '{\"name\": \"ethernet-1/2.0\"}'\n```\n"
  },
  {
    "path": "docs/user_guide/targets/target_discovery/docker_discovery.md",
    "content": "\nThe Docker target loader allows discovering gNMI targets from [Docker Engine](https://docs.docker.com/engine/) hosts.\n\nIt discovers containers as well as their gNMI address, based on a list of [Docker filters](https://docs.docker.com/engine/reference/commandline/ps/#filtering)\n\nOne gNMI target is added per discovered container.\n\nIndividual Target configurations are derived from the container exposed ports and labels, as well as the global configuration.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:3,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n#### Configuration\n\n```yaml\n\nloader:\n  # the loader type: docker\n  type: docker\n  # string, the docker daemon address,\n  # leave empty to use the local docker daemon\n  # possible values:\n  #  - unix:///var/run/docker.sock\n  #  - tcp://<docker_host>:port\n  #  - http://<docker_host>:port\n  address: \"\"\n  # duration, check interval for discovering \n  # new docker containers, default: 30s\n  interval: 30s\n  # duration, the docker queries timeout, \n  # defaults to half of `interval` if left unset or is invalid.\n  timeout: 15s\n  # time to wait before the fist docker query\n  start-delay: 0s\n  # bool, print loader debug statements.\n  debug: false\n  # if true, registers dockerLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n  # containers, network filters: \n  # see https://docs.docker.com/engine/reference/commandline/ps/#filtering\n  # for the possible values.\n  filters:\n      # containers filters\n    - containers:\n        # containers returned by `docker ps -f \"label=clab-node-kind=srl\"`\n        - label: clab-node-kind=srl\n      # network filters\n      network:\n        # networks returned by `docker network ls -f \"label=containerlab\"`\n        label: containerlab\n      # gNMI port value for the containers discovered by this filter.\n      # It can be a port value or a label name set on the container.\n      # valid values:\n      #   `port: \"57400\"`\n      #   `port: \"label=gnmi-port\"`\n      port: \n      # target config for containers discovered by this filter.\n      # These fields will override the matching global config fields.\n      config:\n        username: admin\n        password: secret1\n        skip-verify: true\n  # list of actions to run on target discovery\n  on-add:\n  # list of actions to run on target removal\n  on-delete:\n  # variable dict to pass to actions to be run\n  vars:\n  # path to variable file, the variables defined will be passed to the actions to be run\n  # values in this file will be overwritten by the ones defined in `vars`\n  vars-file:\n```\n\n##### Filter fields explanation\n\n- **containers**: (Optional)\n  \n  A list of lists of docker filters used to select containers from the Docker Engine host.\n\n  The docker filter `status=running` is implicitly added.\n  \n  If not set, all containers with `status=running` are selected.\n\n- **network**: (Optional)\n\n  A set of docker filters used to select the network to connect to the container.\n  \n  If not filter is set, all docker networks are considered.\n\n- **port**: (Optional)\n\n  This field is used to specify the gNMI port for the discovered containers.\n  \n  An integer can be specified in which case it will be used as the gNMI port for all discovered containers.\n  \n  Alternatively, a string in the format `label=<label_name>` can be set, where `<label_name>` is a docker label containing the gNMI port value.\n  \n  If no value is set, the global flag/value `port` is used.\n\n- **config**: (Optional)\n\n  A set of configuration parameters to be applied to all discovered targets by the container filter.\n\n  The target config fields as defined [here](../targets.md#target-configuration-options) can be set, except `name` and `address` which are discovered by the loader.\n\n#### Examples\n\n##### Simple1\n\nA simple docker loader with a single docker container filter.\n\nIt loads all containers deployed with [containerlab](https://containerlab.srlinux.dev/), in lab called `lab1`.\n\n```yaml\nloader:\n  type: docker\n  filters:\n    - containers:\n        - label: containerlab=lab1\n```\n\nIn the above example, `gnmic` docker loader connects to the local Docker Daemon.\n\nIt will discover containers having label `containerlab=lab1` and add them as gNMI targets.\n\nDefault configuration applies to those added targets\n\n##### Simple2\n\nA simple docker loader with a single docker container filter.\n\nIt loads all containers deployed with [containerlab](https://containerlab.srlinux.dev/), having kind `srl`.\n\n```yaml\nloader:\n  type: docker\n  filters:\n    - containers:\n        - label: clab-node-kind=srl\n```\n\nIn the above example, `gnmic` docker loader connects to the local Docker Daemon.\n\nIt will discover containers having label `clab-node-kind=srl` and add them as gNMI targets.\n\nDefault configuration applies to those added targets\n\n##### Advanced Example\n\nA more advanced docker loader, with 2 filers, custom networks, ports and target configuration.\n\n```yaml\nloader:\n  type: docker\n  address: unix:///var/run/docker.sock\n  filters:\n    # filter 1\n    - containers:\n        # containers returned by `docker ps -f \"label=clab-node-kind=srl\"`\n        - label: clab-node-kind=srl\n      network:\n        # networks returned by `docker network ls -f \"label=containerlab\"`\n        label: containerlab\n      port: \"57400\"\n      config:\n        username: admin\n        password: secret1\n        skip-verify: true\n    # filter 2\n    - containers:\n        # containers returned by `docker ps -f \"label=clab-node-kind=ceos\"`\n        - label: clab-node-kind=ceos\n        # containers returned by `docker ps -f \"label=clab-node-kind=vr-sros\"`\n        - label: clab-node-kind=vr-sros\n      network:\n        # networks returned by `docker network ls -f \"name=mgmt\"`\n        name: mgmt\n      # the value of label=gnmi-port exported by each container`\n      port: \"label=gnmi-port\"\n      config:\n        username: admin\n        password: secret2\n        insecure: true\n```\n\nIn the above example, `gnmic` docker loader connects to the docker daemon using the local unix socket address.\n\nIt will discover 2 sets of containers matching 2 filters:\n\n- Filter1:\n    - Containers with label `clab-node-kind=srl`.\n    - Use network with label `containerlab` to connect to them.\n    - The port number is the same for all containers and is set to `57400`.\n    - The config fields `username: admin`, `password: secret1` and `skip-verify: true` will be applied to all the containers discovered by this filter.\n\n- Filter2:\n    - Containers with labels `clab-node-kind-ceos` or `clab-node-vr-sros`\n    - Use network with `name=mgmt` to connect to them. Note that Docker returns all networks with names containing `mgmt`\n    - The port number is discovered from the label `gnmi-port` set on each container.\n    - The config fields `username: admin`, `password: secret2` and `insecure: true` will be applied to all the containers discovered by this filter.\n"
  },
  {
    "path": "docs/user_guide/targets/target_discovery/file_discovery.md",
    "content": "\n`gnmic` is able to watch changes happening to a file that contains the gNMI targets configuration.\n\nThe file can be located in the local file system or a remote one.\n\nIn case of remote file, `ftp`, `sftp`, `http(s)` protocols are supported.\nThe read timeout of remote files is set to half of the read `interval`\n\nNewly added targets are discovered and subscribed to.\nDeleted targets are moved from gNMIc's list and their subscriptions are terminated.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:1,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n#### Configuration\n\nA file target loader can be configured in a couple of ways:\n\n- using the `--targets-file` flag:\n\n``` bash\ngnmic --targets-file ./targets-config.yaml subscribe\n```\n\n``` bash\ngnmic --targets-file sftp://user:pass@server.com/path/to/targets-file.yaml subscribe\n```\n\n- using the main configuration file:\n  \n``` yaml\nloader:\n  type: file\n  # path to the file\n  path: ./targets-config.yaml\n  # watch interval at which the file\n  # is read again to determine if a target was added or deleted.\n  interval: 30s\n  # time to wait before the first file read\n  start-delay: 0s\n  # if true, registers fileLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n  # list of actions to run on target discovery\n  on-add:\n  # list of actions to run on target removal\n  on-delete:\n  # variable dict to pass to actions to be run\n  vars:\n  # path to variable file, the variables defined will be passed to the actions to be run\n  # values in this file will be overwritten by the ones defined in `vars`\n  vars-file:\n```\n\nThe `--targets-file` flag takes precedence over the `loader` configuration section.\n\nThe targets file can be either a `YAML` or a `JSON` file (identified by its extension json, yaml or yml), and follows the same format as the main configuration file `targets` section.\nSee [here](../../../user_guide/targets/targets.md#target-option)\n\n### Examples\n\n#### Local File\n\n``` yaml\nloader:\n  type: file\n  # path to the file\n  path: ./targets-config.yaml\n  # watch interval at which the file\n  # is read again to determine if a target was added or deleted.\n  interval: 30s\n  # if true, registers fileLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n```\n\n#### Remote File\n\nSFTP remote file\n\n``` yaml\nloader:\n  type: file\n  # path to the file\n  path: sftp://user:pass@server.com/path/to/targets-file.yaml\n  # watch interval at which the file\n  # is read again to determine if a target was added or deleted.\n  interval: 30s\n  # if true, registers fileLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n```\n\nFTP remote file\n\n``` yaml\nloader:\n  type: file\n  # path to the file\n  path: ftp://user:pass@server.com/path/to/targets-file.yaml\n  # watch interval at which the file\n  # is read again to determine if a target was added or deleted.\n  interval: 30s\n  # if true, registers fileLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n```\n\nHTTP remote file\n\n``` yaml\nloader:\n  type: file\n  # path to the file\n  path: http://user:pass@server.com/path/to/targets-file.yaml\n  # watch interval at which the file\n  # is read again to determine if a target was added or deleted.\n  interval: 30s\n  # if true, registers fileLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n```\n\n#### Targets file format\n\n=== \"YAML\"\n    ```yaml\n    10.10.10.10:\n        username: admin\n        insecure: true\n    10.10.10.11:\n        username: admin\n    10.10.10.12:\n    10.10.10.13:\n    10.10.10.14:\n    ```\n=== \"JSON\"\n    ```json\n    {\n        \"10.10.10.10\": {\n            \"username\": \"admin\",\n            \"insecure\": true\n        },\n         \"10.10.10.11\": {\n            \"username\": \"admin\",\n        },\n         \"10.10.10.12\": {},\n         \"10.10.10.13\": {},\n         \"10.10.10.14\": {}\n    }\n    ```\n\nJust like the targets in the main configuration file, the missing configuration fields get filled with the global flags,\nthe ENV variables first, the config file main section next and then the default values.\n"
  },
  {
    "path": "docs/user_guide/targets/target_discovery/http_discovery.md",
    "content": "\nThe HTTP target loader can be used to query targets configurations from a remote HTTP server.\n\nIt expects a well formatted `application/json` body and a code 200 response.\n\nIt supports secure connections, basic authentication using a username and password and/or Oauth2 token based authentication.\n\n<div class=\"mxgraph\" style=\"max-width:100%;border:1px solid transparent;margin:0 auto; display:block;\" data-mxgraph=\"{&quot;page&quot;:4,&quot;zoom&quot;:1.4,&quot;highlight&quot;:&quot;#0000ff&quot;,&quot;nav&quot;:true,&quot;check-visible-state&quot;:true,&quot;resize&quot;:true,&quot;url&quot;:&quot;https://raw.githubusercontent.com/openconfig/gnmic/diagrams/diagrams/target_discovery.drawio&quot;}\"></div>\n\n<script type=\"text/javascript\" src=\"https://cdn.jsdelivr.net/gh/hellt/drawio-js@main/embed2.js?&fetch=https%3A%2F%2Fraw.githubusercontent.com%2Fkarimra%2Fgnmic%2Fdiagrams%2Ftarget_discovery.drawio\" async></script>\n\n#### Configuration\n\n``` yaml\nloader:\n  type: http\n  # resource URL, must include the http(s) schema\n  url: \n  # watch interval at which the HTTP endpoint is queried again\n  # to determine if a target was added or deleted.\n  interval: 60s\n  # HTTP request timeout\n  timeout: 50s\n  # time to wait before the fist HTTP query\n  start-delay: 0s\n  # tls config\n  tls:\n    # string, path to the CA certificate file,\n    # this will be used to verify the clients certificates when `skip-verify` is false\n    ca-file:\n    # string, client certificate file.\n    cert-file:\n    # string, client key file.\n    key-file:\n    # boolean, if true, the client will not verify the server\n    # certificate against the available certificate chain.\n    skip-verify: false\n  # username to be used with basic authentication\n  username:\n  # password to be used with basic authentication\n  password:\n  # token to be used with Oauth2 token based authentication\n  token:\n  # auth scheme (default is `Bearer`)\n  auth-scheme:\n  # text template\n  template:\n  # path to a text template file\n  template-file:\n  # if true, registers httpLoader prometheus metrics with the provided\n  # prometheus registry\n  enable-metrics: false\n  # list of actions to run on target discovery\n  on-add:\n  # list of actions to run on target removal\n  on-delete:\n  # variable dict to pass to actions to be run\n  vars:\n  # path to variable file, the variables defined will be passed to the actions to be run\n  # values in this file will be overwritten by the ones defined in `vars`\n  vars-file:\n```\n\n#### Targets file format\n\n=== \"JSON\"\n    ```json\n    {\n        \"10.10.10.10\": {\n            \"username\": \"admin\",\n            \"insecure\": true\n        },\n         \"10.10.10.11\": {\n            \"username\": \"admin\",\n        },\n         \"10.10.10.12\": {},\n         \"10.10.10.13\": {},\n         \"10.10.10.14\": {}\n    }\n    ```\n\nJust like the targets in the main configuration file, the missing configuration fields get filled with the global flags,\nthe ENV variables first, the config file main section next and then the default values.\n"
  },
  {
    "path": "docs/user_guide/targets/targets.md",
    "content": "# Targets\n\nSometimes it is needed to perform an operation on multiple devices; be it getting the same leaf value from a given set of the network elements or setting a certain configuration element to some value.\n\nFor cases like that `gnmic` offers support for multiple targets operations which a user can configure both via CLI flags as well as with the [file-based configuration](../configuration_file.md).\n\n### CLI configuration\n\nSpecifying multiple targets in the CLI is as easy as repeating the [`--address`](../../global_flags.md#address) flag.\n\n```shell\n❯ gnmic -a router1.lab.net:57400 \\\n        -a router2.lab.net:57400 \\\n        get --path /configure/system/name\n```\n\n### File-based configuration\n\nWith the file-based configuration a user has two options to specify multiple targets:\n\n* using `address` option\n* using `targets` option\n\n#### address option\n\nWith `address` option the user must provide a list of addresses. In the YAML format that would look like that:\n\n```yaml\naddress:\n  - \"router1.lab.net:57400\"\n  - \"router2.lab.net:57400\"\n```\n\nThe limitation this approach has is that it is impossible to set different credentials for the targets, they will essentially share the credentials specified in a file or via flags.\n\n#### target option\n\nWith the `targets` option it is possible to set target specific options (such as credentials, subscriptions, TLS config, outputs), and thus this option is recommended to use:\n\n```yaml\ntargets:\n  router1.lab.net:\n    timeout: 2s\n    username: r1\n    password: gnmi_pass\n  router2.lab.net:57000:\n    username: r2\n    password: gnmi_pass\n    tls-key: /path/file1\n    tls-cert: /path/file2\n```\n\nThe target address is defined as the key under the `targets` section of the configuration file. The default port (57400) can be omitted as demonstrated with `router1.lab.net` target address. Have a look at the [file-based targets configuration](https://github.com/openconfig/gnmic/blob/main/config.yaml) example to get a glimpse of what it is capable of.\n\nThe target inherits the globally defined options if the matching options are not set on a target level. For example, if a target doesn't have a username defined, it will use the username value set on a global level.\n\n#### secure/insecure connections\n\n`gnmic` supports both secure and insecure gRPC connections to the target.\n\n##### insecure connection\n\nUsing the `--insecure` flag it is possible to establish an insecure gRPC connection to the target.\n\n```bash\ngnmic -a router1:57400 \\\n      --insecure \\\n      get --path /configure/system/name\n```\n\n##### secure connection\n\n- A one way secure connection without target certificate verification can be established using the `--skip-verify` flag.\n\n```bash\ngnmic -a router1:57400 \\\n      --skip-verify \\\n      get --path /configure/system/name\n```\n\n- Adding target certificate verification can be done using the `--tls-ca` flag.\n\n```bash\ngnmic -a router1:57400 \\\n      --tls-ca /path/to/ca/file \\\n      get --path /configure/system/name\n```\n\n- A two way secure connection can be established using the `--tls-cert` `--tls-key` flags.\n\n```bash\ngnmic -a router1:57400 \\\n      --tls-cert /path/to/certificate/file \\\n      --tls-key /path/to/certificate/file \\\n      get --path /configure/system/name\n```\n\n- It is also possible to control the negotiated TLS version using the `--tls-min-version`, `--tls-max-version` and `--tls-version` (preferred TLS version) flags.\n\n##### Controlling the advertised cipher suites\n\nIt's possible to configure the advertised cipher suites gNMIc's gNMI client advertises to the target.\nThis can be done by setting the `tls-min-version` and `tls-max-version` or by explicitly listing cipher suites to be advertised.\n\nBy default the below list is advertised:\n\n| Name                                           | Key Exchange | Auth      | Enc                  | MAC       |\n|------------------------------------------------|--------------|-----------|----------------------|-----------|\n| TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256        | ECDHE        | ECDSA     | AES_128_GCM          | SHA256    |\n| TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256          | ECDHE        | RSA       | AES_128_GCM          | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384        | ECDHE        | ECDSA     | AES_256_GCM          | SHA384    |\n| TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384          | ECDHE        | RSA       | AES_256_GCM          | SHA384    |\n| TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256  | ECDHE        | ECDSA     | CHACHA20_POLY1305    | SHA256    |\n| TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256    | ECDHE        | RSA       | CHACHA20_POLY1305    | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA           | ECDHE        | ECDSA     | AES_128_CBC          | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA             | ECDHE        | RSA       | AES_128_CBC          | SHA       |\n| TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA           | ECDHE        | ECDSA     | AES_256_CBC          | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA             | ECDHE        | RSA       | AES_256_CBC          | SHA       |\n| TLS_RSA_WITH_AES_128_GCM_SHA256                | RSA          | RSA       | AES_128_GCM          | SHA256    |\n| TLS_RSA_WITH_AES_256_GCM_SHA384                | RSA          | RSA       | AES_256_GCM          | SHA384    |\n| TLS_RSA_WITH_AES_128_CBC_SHA                   | RSA          | RSA       | AES_128_CBC          | SHA       |\n| TLS_RSA_WITH_AES_256_CBC_SHA                   | RSA          | RSA       | AES_256_CBC          | SHA       |\n| TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA            | ECDHE        | RSA       | 3DES_EDE_CBC         | SHA       |\n| TLS_RSA_WITH_3DES_EDE_CBC_SHA                  | RSA          | RSA       | 3DES_EDE_CBC         | SHA       |\n| TLS_AES_128_GCM_SHA256                         | (TLS 1.3)    | (TLS 1.3) | AES_128_GCM          | SHA256    |\n| TLS_AES_256_GCM_SHA384                         | (TLS 1.3)    | (TLS 1.3) | AES_256_GCM          | SHA384    |\n| TLS_CHACHA20_POLY1305_SHA256                   | (TLS 1.3)    | (TLS 1.3) | CHACHA20_POLY1305    | SHA256    |\n\nIf the `tls-max-version` is set to \"1.2\", the TLS1.3 cipher suites will not be included:\n\n| Name                                           | Key Exchange | Auth  | Enc               | MAC       |\n|------------------------------------------------|--------------|-------|-------------------|-----------|\n| TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256        | ECDHE        | ECDSA | AES_128_GCM       | SHA256    |\n| TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256          | ECDHE        | RSA   | AES_128_GCM       | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384        | ECDHE        | ECDSA | AES_256_GCM       | SHA384    |\n| TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384          | ECDHE        | RSA   | AES_256_GCM       | SHA384    |\n| TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256  | ECDHE        | ECDSA | CHACHA20_POLY1305 | SHA256    |\n| TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256    | ECDHE        | RSA   | CHACHA20_POLY1305 | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA           | ECDHE        | ECDSA | AES_128_CBC       | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA             | ECDHE        | RSA   | AES_128_CBC       | SHA       |\n| TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA           | ECDHE        | ECDSA | AES_256_CBC       | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA             | ECDHE        | RSA   | AES_256_CBC       | SHA       |\n| TLS_RSA_WITH_AES_128_GCM_SHA256                | RSA          | RSA   | AES_128_GCM       | SHA256    |\n| TLS_RSA_WITH_AES_256_GCM_SHA384                | RSA          | RSA   | AES_256_GCM       | SHA384    |\n| TLS_RSA_WITH_AES_128_CBC_SHA                   | RSA          | RSA   | AES_128_CBC       | SHA       |\n| TLS_RSA_WITH_AES_256_CBC_SHA                   | RSA          | RSA   | AES_256_CBC       | SHA       |\n| TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA            | ECDHE        | RSA   | 3DES_EDE_CBC      | SHA       |\n| TLS_RSA_WITH_3DES_EDE_CBC_SHA                  | RSA          | RSA   | 3DES_EDE_CBC      | SHA       |\n\nIf the `tls-max-version` and `tls-min-version` are set to \"1.1\", the below list of cipher suites is advertised:\n\n| Name                                   | Key Exchange | Auth  | Enc          | MAC |\n|----------------------------------------|--------------|-------|--------------|-----|\n| TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA   | ECDHE        | ECDSA | AES_128_CBC  | SHA |\n| TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA     | ECDHE        | RSA   | AES_128_CBC  | SHA |\n| TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA   | ECDHE        | ECDSA | AES_256_CBC  | SHA |\n| TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA     | ECDHE        | RSA   | AES_256_CBC  | SHA |\n| TLS_RSA_WITH_AES_128_CBC_SHA           | RSA          | RSA   | AES_128_CBC  | SHA |\n| TLS_RSA_WITH_AES_256_CBC_SHA           | RSA          | RSA   | AES_256_CBC  | SHA |\n| TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA    | ECDHE        | RSA   | 3DES_EDE_CBC | SHA |\n| TLS_RSA_WITH_3DES_EDE_CBC_SHA          | RSA          | RSA   | 3DES_EDE_CBC | SHA |\n\nIf you want to control which cipher suites are sent and in what order of preference, you can set the `cipher-suites` field under the target:\n\n```yaml\ntargets:\n  target1:\n    # other fields\n    cipher-suites:\n      - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\n      - TLS_AES_128_GCM_SHA256\n```\n\nThe full list of supported cipher suites is:\n\n| Name                                           | Key Exchange | Auth      | Enc                | MAC       |\n|------------------------------------------------|--------------|-----------|--------------------|-----------|\n| TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256        | ECDHE        | ECDSA     | AES_128_GCM        | SHA256    |\n| TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256          | ECDHE        | RSA       | AES_128_GCM        | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384        | ECDHE        | ECDSA     | AES_256_GCM        | SHA384    |\n| TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384          | ECDHE        | RSA       | AES_256_GCM        | SHA384    |\n| TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256  | ECDHE        | ECDSA     | CHACHA20_POLY1305  | SHA256    |\n| TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256    | ECDHE        | RSA       | CHACHA20_POLY1305  | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA           | ECDHE        | ECDSA     | AES_128_CBC        | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA             | ECDHE        | RSA       | AES_128_CBC        | SHA       |\n| TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA           | ECDHE        | ECDSA     | AES_256_CBC        | SHA       |\n| TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA             | ECDHE        | RSA       | AES_256_CBC        | SHA       |\n| TLS_RSA_WITH_AES_128_GCM_SHA256                | RSA          | RSA       | AES_128_GCM        | SHA256    |\n| TLS_RSA_WITH_AES_256_GCM_SHA384                | RSA          | RSA       | AES_256_GCM        | SHA384    |\n| TLS_RSA_WITH_AES_128_CBC_SHA                   | RSA          | RSA       | AES_128_CBC        | SHA       |\n| TLS_RSA_WITH_AES_256_CBC_SHA                   | RSA          | RSA       | AES_256_CBC        | SHA       |\n| TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA            | ECDHE        | RSA       | 3DES_EDE_CBC       | SHA       |\n| TLS_RSA_WITH_3DES_EDE_CBC_SHA                  | RSA          | RSA       | 3DES_EDE_CBC       | SHA       |\n| TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256        | ECDHE        | ECDSA     | AES_128_CBC        | SHA256    |\n| TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256          | ECDHE        | RSA       | AES_128_CBC        | SHA256    |\n| TLS_RSA_WITH_AES_128_CBC_SHA256                | RSA          | RSA       | AES_128_CBC        | SHA256    |\n| TLS_ECDHE_ECDSA_WITH_RC4_128_SHA               | ECDHE        | ECDSA     | RC4_128            | SHA       |\n| TLS_ECDHE_RSA_WITH_RC4_128_SHA                 | ECDHE        | RSA       | RC4_128            | SHA       |\n| TLS_RSA_WITH_RC4_128_SHA                       | RSA          | RSA       | RC4_128            | SHA       |\n| TLS_AES_128_GCM_SHA256                         | (TLS 1.3)    | (TLS 1.3) | AES_128_GCM        | SHA256    |\n| TLS_AES_256_GCM_SHA384                         | (TLS 1.3)    | (TLS 1.3) | AES_256_GCM        | SHA384    |\n| TLS_CHACHA20_POLY1305_SHA256                   | (TLS 1.3)    | (TLS 1.3) | CHACHA20_POLY1305  | SHA256    |\n\n#### target configuration options\n\nTarget supported options:\n\n```yaml\ntargets:\n  # target name or an address (IP or DNS name).\n  # if an address is set it can include a port number or not,\n  # if a port is not included, the default gRPC port will be added.\n  target_key:\n    # target name, will default to the target_key if not specified\n    name: target_key\n    # target address, if missing the target_key is used as an address.\n    # supports comma separated addresses.\n    # if any of the addresses is missing a port, the default gRPC port will be added.\n    # if multiple addresses are set, all of them will be tried simultaneously,\n    # the first established gRPC connection will be used, the other attempts will be canceled.\n    address:\n    # target username\n    username:\n    # target password\n    password:\n    # authentication token, \n    # applied only in the case of a secure gRPC connection.\n    token: \n    # target RPC timeout\n    timeout:\n    # establish an insecure connection\n    insecure:\n    # path to tls ca file\n    tls-ca:\n    # path to tls certificate\n    tls-cert:\n    # path to tls key\n    tls-key:\n    # max tls version to use during negotiation\n    tls-max-version:\n    # min tls version to use during negotiation\n    tls-min-version:\n    # preferred tls version to use during negotiation\n    # this value overwrites both tls-min-version and \n    # tls-max-version\n    tls-version:\n    # enable logging of a pre-master TLS secret\n    log-tls-secret:\n    # do not verify the target certificate when using tls\n    skip-verify:\n    # server name used to verify the hostname on the returned \n    # certificates unless skip-verify is true.    \n    tls-server-name:\n    # list of subscription names to establish for this target.\n    # if empty it defaults to all subscriptions defined under\n    # the main level `subscriptions` field\n    subscriptions:\n    # string, case insensitive, defines the gNMI encoding to be used for \n    # the subscriptions to be established for this target.\n    # This encoding value applies only if the subscription configuration does\n    # NOT explicitly define an encoding.\n    encoding:\n    # list of output names to which the gnmi data will be written.\n    # if empty if defaults to all outputs defined under\n    # the main level `outputs` field\n    outputs:\n    # number of subscribe responses to keep in buffer before writing\n    # the target outputs\n    buffer-size:\n    # target retry period\n    retry-timer:\n    # list of tags, relevant when clustering is enabled.\n    tags:\n    # a mapping of static tags to add to all events from this target.\n    # each key/value pair in this mapping will be added to metadata\n    # on all events\n    event-tags:\n    # list of proto file names to decode protoBytes values\n    proto-files:\n    # list of directories to look for the proto files\n    proto-dirs:\n    # enable grpc gzip compression\n    gzip: \n    # proxy type and address, only SOCKS5 is supported currently\n    # example: socks5://<address>:<port>\n    proxy:\n    # list of custom TLS cipher suites to advertise to the target \n    # during the TLS handshake.\n    cipher-suites:\n    # a duration, sets the TCP keepalive time and keepalive interval.\n    # The number of keepalive probes to send before sending a TCP RST\n    # is not configurable, it inherits its value from the linux kernel\n    # net.ipv4.tcp_keepalive_probes which usually has a default value of 9.\n    # When empty or set to 0s, the Golang default (15s) applies.\n    # Disabled if set to a negative value.\n    tcp-keepalive: 0s\n    # sets gRPC keepalive parameters according to: \n    # https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md\n    grpc-keepalive:\n      # After a duration of this time if the client doesn't see any activity \n      # it pings the server to see if the transport is still alive. \n      # If set below 10s, a minimum value of 10s will be used instead.\n      time:\n      # After having pinged for keepalive check, the client waits \n      # for a duration of Timeout and if no activity is seen even \n      # after that the connection is closed.\n      timeout:\n      # If true, client sends keepalive pings even with no active RPCs. \n      # If false, when there are no active RPCs, \n      # Time and Timeout will be ignored and no keepalive pings will be sent.\n      permit-without-stream: false\n    # set how much data (in bytes) can be read at most for each read syscall.\n    # The default value for this buffer is 32KB. Zero or negative values will \n    # disable read buffer for a connection so data framer can access the underlying conn directly.\n    grpc-read-buffer-size: \n    # determines how much data (in bytes) can be batched before doing a write on the wire. \n    # The default value for this buffer is 32KB.\n    # Zero or negative values will disable the write buffer such that each write will be on underlying connection. \n    # Note: A Send call may not directly translate to a write.\n    grpc-write-buffer-size:\n    # sets the value for initial window size on a connection. The lower bound for window size is 64K and any value smaller than that will be ignored.\n    grpc-conn-window-size:\n    # sets the value for initial window size on a stream. The lower bound for window size is 64K and any value smaller than that will be ignored.\n    grpc-window-size:\n    # sets the initial connection window size to the value provided and disables dynamic flow control.\n    grpc-static-conn-window-size:\n    # sets the initial stream window size to the value provided and disables dynamic flow control.\n    grpc-static-stream-window-size:\n```\n\n### Example\n\nWhatever configuration option you choose, the multi-targeted operations will uniformly work across the commands that support them.\n\nConsider the `get` command acting on two routers getting their names:\n\n```shell\n❯ gnmic -a router1.lab.net:57400 \\\n        -a router2.lab.net:57400 \\\n        get --path /configure/system/name\n\n[router1.lab.net:57400] {\n[router1.lab.net:57400]   \"source\": \"router1.lab.net:57400\",\n[router1.lab.net:57400]   \"timestamp\": 1593009759618786781,\n[router1.lab.net:57400]   \"time\": \"2020-06-24T16:42:39.618786781+02:00\",\n[router1.lab.net:57400]   \"updates\": [\n[router1.lab.net:57400]     {\n[router1.lab.net:57400]       \"Path\": \"configure/system/name\",\n[router1.lab.net:57400]       \"values\": {\n[router1.lab.net:57400]         \"configure/system/name\": \"gnmic_r1\"\n[router1.lab.net:57400]       }\n[router1.lab.net:57400]     }\n[router1.lab.net:57400]   ]\n[router1.lab.net:57400] }\n\n[router2.lab.net:57400] {\n[router2.lab.net:57400]   \"source\": \"router2.lab.net:57400\",\n[router2.lab.net:57400]   \"timestamp\": 1593009759748265232,\n[router2.lab.net:57400]   \"time\": \"2020-06-24T16:42:39.748265232+02:00\",\n[router2.lab.net:57400]   \"updates\": [\n[router2.lab.net:57400]     {\n[router2.lab.net:57400]       \"Path\": \"configure/system/name\",\n[router2.lab.net:57400]       \"values\": {\n[router2.lab.net:57400]         \"configure/system/name\": \"gnmic_r2\"\n[router2.lab.net:57400]       }\n[router2.lab.net:57400]     }\n[router2.lab.net:57400]   ]\n[router2.lab.net:57400] }\n```\n\nNotice how in the output the different gNMI targets are prefixed with the target address to make the output easy to read. If those prefixes are not needed, you can make them disappear with [`--no-prefix`](../../global_flags.md#no-prefix) global flag.\n"
  },
  {
    "path": "docs/user_guide/targets/targets_session_sec.md",
    "content": "# Targets session security\n\nIn line with the guidelines detailed in the [gNMI Specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#31-session-security-authentication-and-rpc-authorization), it is mandatory to establish an encrypted TLS session between the client and the server. This measure is essential to ensure secure communication within the gNMI protocol.\n\n```text\nThe session between the client and server MUST be encrypted using TLS - \nand a target or client MUST NOT fall back to unencrypted sessions. \nThe target and client SHOULD implement TLS >= 1.2.\n```\n\n`gNMIc` provides the ability to tailor and modify the TLS session parameters of the gNMI client according to your specific requirements.\n\n## TLS session types\n\nWhen it comes to establishing a TLS session using `gNMIc`, various options are available to suit different use cases and environmental requirements. Whether it's a one-way TLS session, a session without certificate validation, or a mutual TLS (mTLS) session, each type caters to specific needs. The selection largely depends on the user's scenario and the degree of security and validation necessary. The upcoming sections will detail each of these session types, offering guidelines to aid in choosing the most appropriate for your specific requirements.\n\n### Simple TLS session w/o server certificate validation\n\nFor scenarios requiring a simple TLS session without server certificate validation, such as in certain testing or development environments, you can use gNMIc's `--skip-verify` flag or the `skip-verify` attribute. This mode bypasses the typical certificate verification process and establishes a secure connection without validating the server's identity. Please exercise caution when using this feature, as it may expose the connection to potential security vulnerabilities. It is recommended primarily for non-production environments or controlled testing situations.\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --skip-verify \\\n                 get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        skip-verify: true\n    ```\n\n### Simple TLS session with server certificate validation\n\nWhen establishing a simple TLS session with server certificate validation for enhanced security, gNMIc offers the --tls-ca flag or the tls-ca attribute. These options allow you to point to a Certificate Authority (CA) certificate file. By doing so, the session not only ensures encrypted communication but also verifies the server's identity through its certificate. This validation process greatly enhances the security of the connection, ensuring the client is communicating with the intended server. It's an advisable setting for production environments where data security and integrity are crucial.\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n    ```\n\n### Simple TLS session with server certificate validation and server name override\n\nThere are circumstances where the server's identity, as indicated by its certificate, doesn't match its expected hostname. For such scenarios, gNMIc enables the initiation of a simple TLS session with both server certificate validation and server name override. This functionality can be utilized by employing the `--tls-server-name` flag or the `tls-server-name` attribute.\n\nBy overriding the server name in the TLS session, users can specify a different hostname that matches the server's certificate, even if it's not the actual hostname of the server. This allows for successful validation and secure communication even in cases of server name discrepancies due to reasons like load balancing, proxying, etc...\n\nThis feature is particularly beneficial in complex network scenarios or during migrations, where server names might not yet align with their certificates. By ensuring both secure encrypted communication and flexible server name accommodation, it adds an extra layer of adaptability for secure communication, particularly in dynamic or complex network environments.\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    --tls-server-name server1 \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n        tls-server-name: server1\n    ```\n\n### Mutual TLS (mTLS) session\n\nFor heightened security scenarios, gNMIc supports mutual TLS (mTLS) sessions. mTLS not only verifies the server's identity to the client, but also the client's identity to the server. This reciprocal verification is achieved using the --tls-cert and --tls-key flags, or the tls-cert and tls-key attributes. These options allow the user to specify a client certificate and client key, respectively.\n\nBy providing a client certificate (`--tls-cert` or `tls-cert` attribute) and a client key (`--tls-key` or `tls-key` attribute), gNMIc allows the server to confirm the identity of the client, ensuring that the client is legitimate and authorized to access the server resources.\n\nMutual TLS is particularly beneficial in use cases where both ends of a connection need to confirm the other's identity, providing a significantly higher level of trust and security. It reduces the risk of man-in-the-middle attacks and is especially valuable in environments where sensitive data is transmitted or strict access control is required.\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    --tls-cert ./router1.cert \\\n                    --tls-key ./router.key \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n        tls-cert: ./router1.cert\n        tls-key: ./router1.key\n    ```\n\n### mTLS session with server name override\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    --tls-server-name server1 \\\n                    --tls-cert ./router1.cert \\\n                    --tls-key ./router.key \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n        tls-server-name: server1\n        tls-cert: ./router1.cert\n        tls-key: ./router1.key\n    ```\n\n## Configuring the client's TLS version\n\nBy default, `gNMIc` establishes a TLS session using the Golang's default TLS version (1.2), minimum version (1.2), and maximum version (1.3).\n\nHowever, there might be scenarios where users need to control the TLS session negotiation to either test the server behavior or force the session into a specific version. To accommodate these needs, gNMIc provides flexibility by allowing users to explicitly set the TLS version.\n\nUsers can manipulate the negotiated TLS version using the flags (or target attributes) `--tls-version`, `--tls-min-version`, and `--tls-max-version`. These flags give control over the TLS session parameters, facilitating testing and customization of the communication session according to specific requirements.\n\nExample: Forcing the client and server to use TLS1.3\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    --tls-cert ./router1.cert \\\n                    --tls-key ./router.key \\\n                    --tls-version 1.3 \\\n                    --tls-min-version 1.3 \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n        tls-cert: ./router1.cert\n        tls-key: ./router1.key\n        tls-version: 1.3\n        tls-min-version: 1.3\n    ```\n\n## Decrypting gNMI traffic using Wireshark\n\nTo facilitate advanced debugging or network analysis, gNMIc allows for the decryption of gNMI TLS traffic using the popular network protocol analyzer, Wireshark. The `--log-tls-secret` flag is instrumental in achieving this, as it stores the session pre-master secret, which can subsequently be used to decrypt TLS traffic.\n\nWhen `--log-tls-secret` is used, the session's pre-master secret will be stored in a file named `<target-name>.tlssecret.log`. This secret enables Wireshark to decrypt the otherwise secure and encrypted TLS traffic between the client and the server.\n\nDecryption of TLS traffic is particularly useful for network troubleshooting, performance optimization, or security audits. It allows network administrators or developers to deeply inspect packet data, diagnose network issues, and better understand data flows. However, this practice should be used carefully and ethically, given the sensitive nature of decrypted traffic, especially in production environments.\n\n=== \"cli\"\n    ```shell\n    gnmic -a router1 --tls-ca ./ca.pem \\\n                    --log-tls-secret \\\n                    --tls-cert ./router1.cert \\\n                    --tls-key ./router.key \\\n                    get --path /interface/oper-state\n    ```\n=== \"file\"\n    ```yaml\n    targets:\n      router1:\n        address: router1\n        tls-ca: ./ca.pem\n        log-tls-secret: true\n        tls-cert: ./router1.cert\n        tls-key: ./router1.key\n    ```\n"
  },
  {
    "path": "docs/user_guide/tunnel_server.md",
    "content": "# Tunnel Server\n\n## Introduction\n\n`gNMIc` supports gNMI Dial-out as defined by [`openconfig/grpctunnel`](https://github.com/openconfig/grpctunnel).\n\n`gNMIc` embeds a tunnel server to which the gNMI targets register. Once registered, `gNMIc` triggers the request gNMI RPC towards the target via the established tunnel.\n\nThis use case is described [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmignoissh-dialout-grpctunnel.md#gnmi-collector-with-embedded-tunnel-server)\n\n## Server operation\n\nWhen running a Subscribe RPC using `gNMIc` with the flag `--use-tunnel-server`,`gNMIc` starts by running the Tunnel server as defined under `tunnel-server`.\n\nThe next steps depend on the type of RPC (Unary/Stream) and/or Subscribe Mode (poll/once/stream)\n\n### Unary RPCs\n\n`gNMIc` waits for `tunnel-server.target-wait-time` for targets to register with the tunnel server, after which it requests a new session from the server for the specified target(s) and runs the RPC through the newly established tunnel.\n\nNote that if no target is specified, the RPC runs for all registered targets.\n\n```bash\n$ cat tunnel_server_config.yaml\ninsecure: true\nlog: true\nusername: admin\npassword: NokiaSrl1!\n\ntunnel-server:\n  address: \":57401\"\n```\n\n```bash\n$ gnmic --config tunnel_server_config.yaml \\\n      --use-tunnel-server \\\n      get \\\n      --path /configure/system/name\n2022/03/09 10:12:34.729037 [gnmic] version=dev, commit=none, date=unknown, gitURL=, docs=https://gnmic.openconfig.net\n2022/03/09 10:12:34.729063 [gnmic] using config file \"tunnel_server_config.yaml\"\n2022/03/09 10:12:34.730472 [gnmic] waiting for targets to register with the tunnel server...\n2022/03/09 10:12:36.435521 [gnmic] tunnel server discovered target {ID:sr1 Type:GNMI_GNOI}\n2022/03/09 10:12:36.436332 [gnmic] tunnel server discovered target {ID:sr2 Type:GNMI_GNOI}\n2022/03/09 10:12:36.731125 [gnmic] adding target {\"name\":\"sr1\",\"address\":\"sr1\",\"username\":\"admin\",\"password\":\"NokiaSrl1!\",\"timeout\":10000000000,\"insecure\":true,\"skip-verify\":false,\"subscriptions\":[\"sub1\"],\"retry-timer\":10000000000,\"log-tls-secret\":false,\"gzip\":false,\"token\":\"\"}\n2022/03/09 10:12:36.731158 [gnmic] adding target {\"name\":\"sr2\",\"address\":\"sr2\",\"username\":\"admin\",\"password\":\"NokiaSrl1!\",\"timeout\":10000000000,\"insecure\":true,\"skip-verify\":false,\"subscriptions\":[\"sub1\"],\"retry-timer\":10000000000,\"log-tls-secret\":false,\"gzip\":false,\"token\":\"\"}\n2022/03/09 10:12:36.731651 [gnmic] sending gNMI GetRequest: prefix='<nil>', path='[elem:{name:\"configure\"}  elem:{name:\"system\"}  elem:{name:\"name\"}]', type='ALL', encoding='JSON', models='[]', extension='[]' to sr1\n2022/03/09 10:12:36.731742 [gnmic] sending gNMI GetRequest: prefix='<nil>', path='[elem:{name:\"configure\"}  elem:{name:\"system\"}  elem:{name:\"name\"}]', type='ALL', encoding='JSON', models='[]', extension='[]' to sr2\n2022/03/09 10:12:36.732337 [gnmic] dialing tunnel connection for tunnel target \"sr2\"\n2022/03/09 10:12:36.732572 [gnmic] dialing tunnel connection for tunnel target \"sr1\"\n[sr1] [\n[sr1]   {\n[sr1]     \"source\": \"sr1\",\n[sr1]     \"timestamp\": 1646849561604621769,\n[sr1]     \"time\": \"2022-03-09T10:12:41.604621769-08:00\",\n[sr1]     \"updates\": [\n[sr1]       {\n[sr1]         \"Path\": \"configure/system/name\",\n[sr1]         \"values\": {\n[sr1]           \"configure/system/name\": \"sr1\"\n[sr1]         }\n[sr1]       }\n[sr1]     ]\n[sr1]   }\n[sr1] ]\n[sr2] [\n[sr2]   {\n[sr2]     \"source\": \"sr2\",\n[sr2]     \"timestamp\": 1646849562004804732,\n[sr2]     \"time\": \"2022-03-09T10:12:42.004804732-08:00\",\n[sr2]     \"updates\": [\n[sr2]       {\n[sr2]         \"Path\": \"configure/system/name\",\n[sr2]         \"values\": {\n[sr2]           \"configure/system/name\": \"sr2\"\n[sr2]         }\n[sr2]       }\n[sr2]     ]\n[sr2]   }\n[sr2] ]\n```\n\n### Subscribe RPC\n\n#### Poll and Once subscription\n\nWhen a Poll or Once subscription are requested, `gNMIc` behaves the same way as for a unary RPC, i.e waits for targets to register then runs the RPC.\n\n#### Stream subscription\n\nIn the case of a stream subscription, `gNMIc` triggers the Subscribe RPC as soon as a target registers.\nSimilarly, a stream subscription will be stopped when a target deregisters from the tunnel server.\n\n## Configuration\n\n```yaml\ntunnel-server:\n  # the address the tunnel server will listen to\n  address:\n  # tls config\n  tls:\n    # string, path to the CA certificate file,\n    # this certificate is used to verify the clients certificates.\n    ca-file:\n    # string, server certificate file.\n    cert-file:\n    # string, server key file.\n    key-file:\n    # string, one of `\"\", \"request\", \"require\", \"verify-if-given\", or \"require-verify\" \n    #  - request:         The server requests a certificate from the client but does not \n    #                     require the client to send a certificate. \n    #                     If the client sends a certificate, it is not required to be valid.\n    #  - require:         The server requires the client to send a certificate and does not \n    #                     fail if the client certificate is not valid.\n    #  - verify-if-given: The server requests a certificate, \n    #                     does not fail if no certificate is sent. \n    #                     If a certificate is sent it is required to be valid.\n    #  - require-verify:  The server requires the client to send a valid certificate.\n    #\n    # if no ca-file is present, `client-auth` defaults to \"\"`\n    # if a ca-file is set, `client-auth` defaults to \"require-verify\"`\n    client-auth: \"\"\n  # the wait time before triggering unary RPCs or subscribe poll/once\n  target-wait-time: 2s\n  # enables the collection of Prometheus gRPC server metrics\n  enable-metrics: false\n  # enable additional debug logs\n  debug: false\n```\n\n## Combining Tunnel server with a gNMI server\n\nIt is possible to start `gNMIc` with both a `gnmi-server` and `tunnel-server` enabled.\n\nThis mode allows to run gNMI RPCs against `gNMIc`'s gNMI server, they will routed to the relevant targets (`--target` flag) or to all known target (i.e registered targets)\n\nThe configuration file would look like:\n\n```yaml\ninsecure: true\nusername: admin\npassword: NokiaSrl1!\n\nsubscriptions:\n  sub1:\n    paths:\n      - /state/port\n    sample-interval: 10s\n\ngnmi-server:\n  address: :57400\n\ntunnel-server:\n  address: :57401\n  targets:\n    - id: .*\n      type: GNMI_GNOI\n      config:\n        subscriptions:\n          - sub1\n```\n\nRunning a Get RPC towards all registered targets\n\n```bash\n$ gnmic -a localhost:57400 --insecure get \\\n        --path /configure/system/name\n[\n  {\n    \"source\": \"localhost\",\n    \"timestamp\": 1646850987401608313,\n    \"time\": \"2022-03-09T10:36:27.401608313-08:00\",\n    \"target\": \"sr2\",\n    \"updates\": [\n      {\n        \"Path\": \"configure/system/name\",\n        \"values\": {\n          \"configure/system/name\": \"sr2\"\n        }\n      }\n    ]\n  },\n  {\n    \"source\": \"localhost\",\n    \"timestamp\": 1646850987205206394,\n    \"time\": \"2022-03-09T10:36:27.205206394-08:00\",\n    \"target\": \"sr1\",\n    \"updates\": [\n      {\n        \"Path\": \"configure/system/name\",\n        \"values\": {\n          \"configure/system/name\": \"sr1\"\n        }\n      }\n    ]\n  }\n]\n```\n\nRunning a Get RPC towards a single target\n\n```bash\n$ gnmic -a localhost:57400 --insecure \\\n        --target sr1 \\\n        get --path /configure/system/name\n[\n  {\n    \"source\": \"localhost\",\n    \"timestamp\": 1646851044004381267,\n    \"time\": \"2022-03-09T10:37:24.004381267-08:00\",\n    \"target\": \"sr1\",\n    \"updates\": [\n      {\n        \"Path\": \"configure/system/name\",\n        \"values\": {\n          \"configure/system/name\": \"sr1\"\n        }\n      }\n    ]\n  }\n]\n```\n\nFor detailed configuration of the `gnmi-server` check this [page](./gnmi_server.md)\n"
  },
  {
    "path": "examples/deployments/1.single-instance/1.nats-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab11\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab11-nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/1.nats-output/containerlab/nats.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab11\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n\n    nats:\n      kind: linux\n      image: nats:latest  \n      ports:\n         - 4222:4222    \n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]"
  },
  {
    "path": "examples/deployments/1.single-instance/1.nats-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic1.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - nats\n      \n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n      "
  },
  {
    "path": "examples/deployments/1.single-instance/1.nats-output/docker-compose/gnmic1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n  #   outputs:\n  #     - nats-output\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab110\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface[name=*]/statistics\n      - /interface[name=*]/description\n    stream-mode: sample\n    sample-interval: 10s\n    encoding: ascii\n  sub2:\n    paths:\n      - /acl/cpm-filter/ipv4-filter/entry/statistics\n      - /acl/cpm-filter/ipv6-filter/entry/statistics\n    stream-mode: sample\n    sample-interval: 10s\n    encoding: ascii \n\noutputs:\n  prom-output-redis:\n    type: prometheus\n    listen: \"clab-lab110-gnmic:9804\"\n    service-registration:\n      address: clab-lab110-consul-agent:8500\n    event-processors:\n      - group_by_interface\n    cache:\n      type: redis\n      address: \"clab-lab110-redis:6379\"\n\n  prom-output-nats:\n    type: prometheus\n    listen: \"clab-lab110-gnmic:9805\"\n    service-registration:\n      address: clab-lab110-consul-agent:8500\n    event-processors:\n      - group_by_interface\n    cache:\n      type: nats\n\n  prom-output-js:\n    type: prometheus\n    listen: \"clab-lab110-gnmic:9806\"\n    service-registration:\n      address: clab-lab110-consul-agent:8500\n    event-processors:\n      - group_by_interface\n    cache:\n      type: jetstream\n\n  prom-output-oc:\n    type: prometheus\n    listen: \"clab-lab110-gnmic:9807\"\n    service-registration:\n      address: clab-lab110-consul-agent:8500\n    event-processors:\n      - group_by_interface\n    cache: {}\n\nprocessors:\n  group_by_interface:\n    event-group-by:\n      tags:\n        - target\n        - interface_name\n\napi-server:\n  enable-metrics: true"
  },
  {
    "path": "examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab110-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab110-consul-agent:8500\n        services:\n          - prometheus-prom-output-js\n          - prometheus-prom-output-nats\n          - prometheus-prom-output-oc\n          - prometheus-prom-output-redis\n\n  - job_name: 'gnmic-internal'\n    scrape_interval: 10s \n    static_configs:\n      - targets:\n        - clab-lab110-gnmic:7890\n"
  },
  {
    "path": "examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/prometheus.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab110\n\nmgmt:\n  ipv4-subnet: 192.168.1.0/24\n\ntopology:\n  defaults:\n    kind: srl\n\n  kinds:\n    srl:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n      cmd: \"--config /app/gnmic.yaml --log subscribe\"\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: \"agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\"\n\n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n    redis:\n      kind: linux\n      image: redis:7\n      ports:\n        - 6379:6379\n      cmd: redis-server --loglevel warning\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/11.kafka-kraft-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\n# docker target loader\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab12\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  kafka-output:\n    type: kafka\n    address: clab-lab12-kafka-server:9092\n    topic: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/11.kafka-kraft-output/containerlab/kafka.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab12\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n\n    kafka-server:\n      kind: linux\n      image: bitnami/kafka:latest\n      ports:\n        - 9092:9092\n        - 9093:9093\n      env:\n        KAFKA_ENABLE_KRAFT: yes\n        ALLOW_PLAINTEXT_LISTENER: yes\n        KAFKA_CFG_NODE_ID: 0\n        KAFKA_CFG_PROCESS_ROLES: broker,controller\n        KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER\n        KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093\n        KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT\n        KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab12-kafka-server:9092\n        KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@clab-lab12-kafka-server:9093\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/2.kafka-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\n# docker target loader\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab12\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  kafka-output:\n    type: kafka\n    address: clab-lab12-kafka-server:9092\n    topic: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/2.kafka-output/containerlab/kafka.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab12\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n\n    kafka-server:\n      kind: linux\n      image: bitnami/kafka:3.5.2\n      ports:\n        - 9092:9092\n        - 9000:9000\n      env:\n        KAFKA_CFG_ZOOKEEPER_CONNECT: clab-lab12-zookeeper-server:2181\n        KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab12-kafka-server:9092\n        ALLOW_PLAINTEXT_LISTENER: \"yes\"\n        JMX_PORT: 9000\n\n    zookeeper-server:\n      kind: linux\n      image: bitnami/zookeeper:latest\n      ports:\n        - 2181:2181\n      env:\n        ALLOW_ANONYMOUS_LOGIN: \"yes\"\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]"
  },
  {
    "path": "examples/deployments/1.single-instance/2.kafka-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic1.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - kafka-server\n      \n  kafka-server:\n    image: 'bitnami/kafka:latest'\n    container_name: kafka-server\n    networks:\n      - gnmic-net\n    ports:\n      - \"9092:9092\"\n      - \"9000:9000\"\n    environment:\n      - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181\n      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka-server:9092\n      - ALLOW_PLAINTEXT_LISTENER=yes\n      - JMX_PORT=9000\n    depends_on:\n      - zookeeper-server\n\n  zookeeper-server:\n    image: 'bitnami/zookeeper:latest'\n    container_name: zk-server\n    networks:\n      - gnmic-net\n    ports:\n      - \"2181:2181\"\n    environment:\n      - ALLOW_ANONYMOUS_LOGIN=yes\n      "
  },
  {
    "path": "examples/deployments/1.single-instance/2.kafka-output/docker-compose/gnmic1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n  #   outputs:\n  #     - nats-output\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  kafka-output:\n    type: kafka\n    address: kafka-server:9092\n    topic: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/3.influxdb-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab13\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://clab-lab13-influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic123 # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/1.single-instance/3.influxdb-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\ndeleteDatasources:\n  - name: InfluxDB\n    orgId: 1\ndatasources:\n  - name: InfluxDB\n    type: influxdb\n    orgId: 1\n    url: http://clab-lab13-influxdb:8086\n    user: gnmic\n    password: gnmic\n    database: telemetry\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/3.influxdb-output/containerlab/influxdb.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab13\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n\n    influxdb:\n      kind: linux\n      image: influxdb:1.8.10\n      ports:\n        - 8086:8086\n      env:\n        INFLUXDB_DATA_ENGINE: tsm1\n        INFLUXDB_REPORTING_DISABLED: \"false\"\n        INFLUXDB_USER: gnmic\n        INFLUXDB_USER_PASSWORD: gnmic\n        INFLUXDB_DB: telemetry\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n        \n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n\n"
  },
  {
    "path": "examples/deployments/1.single-instance/3.influxdb-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic1.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - influxdb\n\n  influxdb:\n    image: influxdb:1.8.10\n    container_name: influxdb\n    networks:\n      - gnmic-net\n    ports:\n      - \"8083:8083\"\n      - \"8086:8086\"\n      - \"8090:8090\"\n    environment:\n      - INFLUXDB_DATA_ENGINE=tsm1\n      - INFLUXDB_REPORTING_DISABLED=false\n      - INFLUXDB_USER=gnmic\n      - INFLUXDB_USER_PASSWORD=gnmic\n      - INFLUXDB_DB=telemetry\n    volumes:\n      - influx-storage:/var/lib/influxdb\n\nvolumes:\n  influx-storage:"
  },
  {
    "path": "examples/deployments/1.single-instance/3.influxdb-output/docker-compose/gnmic1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab14\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n      - /interface/description\n    stream-mode: sample\n    sample-interval: 10s\n    encoding: ascii\n\noutputs:     \n  prom-output:\n    type: prometheus\n    listen: \"clab-lab14-gnmic:9804\"\n    cache: {}\n    debug: true\n    strings-as-labels: true\n    service-registration:\n      address: clab-lab14-consul-agent:8500\n    event-processors:\n      - group_by_interface\n\nprocessors:\n  group_by_interface:\n    event-group-by:\n      tags:\n        - interface_name\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab14-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab14-consul-agent:8500\n        services:\n          - prometheus-prom-output\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab14\n\nmgmt:\n  ipv4-subnet: 192.168.1.0/24\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: \"--config /app/gnmic.yaml --log subscribe\"\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: \"agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\"\n\n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic1.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    ports:\n      - 9804:9804\n    depends_on:\n      - consul-agent \n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0    \n\n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/docker-compose/gnmic1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"gnmic1:9804\"\n    service-registration:\n      address: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/1.single-instance/4.prometheus-output/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n        services:\n          - prometheus-prom-output"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab15\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n\noutputs:\n  file-output:\n    type: file\n    filename: /app/file-out.txt\n    \n  prom-output:\n    type: prometheus\n    listen: \"clab-lab15-gnmic:9804\"\n    service-registration:\n      address: clab-lab15-consul-agent:8500\n\n  influxdb-output:\n    type: influxdb\n    url: http://clab-lab15-influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s\n  \n  kafka-output:\n    type: kafka\n    address: clab-lab15-kafka-server:9092\n    topic: telemetry\n  \n  nats-output:\n    type: nats\n    address: clab-lab15-nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n  - name: InfluxDB\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab15-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n\n  - name: InfluxDB\n    type: influxdb\n    orgId: 1\n    url: http://clab-lab15-influxdb:8086\n    user: gnmic\n    password: gnmic\n    database: telemetry\n    editable: true"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/containerlab/multiple-outputs.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab15\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n\n    gnmic:\n      kind: linux\n      # image: ghcr.io/openconfig/gnmic:latest\n      image: priv-gnmic\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n    \n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n         - 4222:4222        \n    \n    kafka-server:\n      kind: linux\n      image: bitnami/kafka:latest\n      ports:\n        - 9092:9092\n      env:\n        KAFKA_CFG_ZOOKEEPER_CONNECT: clab-lab15-zookeeper-server:2181\n        KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab15-kafka-server:9092\n        ALLOW_PLAINTEXT_LISTENER: \"yes\"\n        JMX_PORT: 9000\n\n    zookeeper-server:\n      kind: linux\n      image: bitnami/zookeeper:latest\n      ports:\n        - 2181:2181\n      env:\n        ALLOW_ANONYMOUS_LOGIN: \"yes\"    \n\n    influxdb:\n      kind: linux\n      image: influxdb:1.8.10\n      ports:\n        - 8086:8086\n      env:\n        INFLUXDB_DATA_ENGINE: tsm1\n        INFLUXDB_REPORTING_DISABLED: \"false\"\n        INFLUXDB_USER: gnmic\n        INFLUXDB_USER_PASSWORD: gnmic\n        INFLUXDB_DB: telemetry\n    \n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n                  \n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n\n"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab15-consul-agent:8500\n        services:\n          - prometheus-prom-output"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic1.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    ports:\n      - 9804:9804\n    depends_on:\n      - consul-agent \n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0    \n\n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n  \n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n\n  kafka-server:\n    image: 'bitnami/kafka:latest'\n    container_name: kafka-server\n    networks:\n      - gnmic-net\n    ports:\n      - \"9092:9092\"\n      - \"9000:9000\"\n    environment:\n      - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181\n      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka-server:9092\n      - ALLOW_PLAINTEXT_LISTENER=yes\n      - JMX_PORT=9000\n    depends_on:\n      - zookeeper-server\n\n  zookeeper-server:\n    image: 'bitnami/zookeeper:latest'\n    container_name: zk-server\n    networks:\n      - gnmic-net\n    ports:\n      - \"2181:2181\"\n    environment:\n      - ALLOW_ANONYMOUS_LOGIN=yes\n\n  influxdb:\n    image: influxdb:1.8.10\n    container_name: influxdb\n    networks:\n      - gnmic-net\n    ports:\n      - \"8083:8083\"\n      - \"8086:8086\"\n      - \"8090:8090\"\n    environment:\n      - INFLUXDB_DATA_ENGINE=tsm1\n      - INFLUXDB_REPORTING_DISABLED=false\n      - INFLUXDB_USER=gnmic\n      - INFLUXDB_USER_PASSWORD=gnmic\n      - INFLUXDB_DB=telemetry\n    volumes:\n      - influx-storage:/var/lib/influxdb\n\nvolumes:\n  prometheus-data:\n  influx-storage:\n"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/gnmic1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  file-output:\n    type: file\n    filename: /app/file-out.txt\n    \n  prom-output:\n    type: prometheus\n    listen: \"gnmic1:9804\"\n    service-registration:\n      address: consul-agent:8500\n\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s\n  \n  kafka-output:\n    type: kafka\n    address: kafka-server:9092\n    topic: telemetry\n  \n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry"
  },
  {
    "path": "examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n        services:\n          - prometheus-prom-output"
  },
  {
    "path": "examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: nokia_srlinux\n        label=containerlab: lab16\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n    \noutputs:\n  prom-remote:\n    type: prometheus_write\n    url: http://clab-lab16-prometheus:9090/api/v1/write\n    metadata:\n      include: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: prometheus\n    orgId: 1\n\ndatasources:\n  - name: prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab16-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n"
  },
  {
    "path": "examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab16\n\nmgmt:\n  bridge: prom\n  ipv4-subnet: 172.19.19.0/24\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: \"--config /app/gnmic.yaml --log subscribe\"\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: \"agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\"\n\n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --web.enable-remote-write-receiver\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        # - grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/7.cortex-output/containerlab/cortex/single-process-config-blocks.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n\n# Configuration for running Cortex in single-process mode.\n# This should not be used in production.  It is only for getting started\n# and development.\n\n# Disable the requirement that every request to Cortex has a\n# X-Scope-OrgID header. `fake` will be substituted in instead.\nauth_enabled: false\n\nserver:\n  http_listen_port: 9009\n\n  # Configure the server to allow messages up to 100MB.\n  grpc_server_max_recv_msg_size: 104857600\n  grpc_server_max_send_msg_size: 104857600\n  grpc_server_max_concurrent_streams: 1000\n\ndistributor:\n  shard_by_all_labels: true\n  pool:\n    health_check_ingesters: true\n\ningester_client:\n  grpc_client_config:\n    # Configure the client to allow messages up to 100MB.\n    max_recv_msg_size: 104857600\n    max_send_msg_size: 104857600\n    grpc_compression: gzip\n\ningester:\n  lifecycler:\n    # The address to advertise for this ingester.  Will be autodiscovered by\n    # looking up address on eth0 or en0; can be specified if this fails.\n    # address: 127.0.0.1\n\n    # We want to start immediately and flush on shutdown.\n    join_after: 0\n    min_ready_duration: 0s\n    final_sleep: 0s\n    num_tokens: 512\n\n    # Use an in memory ring store, so we don't need to launch a Consul.\n    ring:\n      kvstore:\n        store: inmemory\n      replication_factor: 1\n\nstorage:\n  engine: blocks\n\nblocks_storage:\n  tsdb:\n    dir: /tmp/cortex/tsdb\n\n  bucket_store:\n    sync_dir: /tmp/cortex/tsdb-sync\n\n  # You can choose between local storage and Amazon S3, Google GCS and Azure storage. Each option requires additional configuration\n  # as shown below. All options can be configured via flags as well which might be handy for secret inputs.\n  backend: filesystem # s3, gcs, azure or filesystem are valid options\n  # s3:\n  #   bucket_name: cortex\n  #   endpoint: s3.amazonaws.com\n    # Configure your S3 credentials below.\n    # secret_access_key: \"TODO\"\n    # access_key_id:     \"TODO\"\n#  gcs:\n#    bucket_name: cortex\n#    service_account: # if empty or omitted Cortex will use your default service account as per Google's fallback logic\n#  azure:\n#    account_name:\n#    account_key:\n#    container_name:\n#    endpoint_suffix:\n#    max_retries: # Number of retries for recoverable errors (defaults to 20)\n  filesystem:\n    dir: ./data/tsdb\n\ncompactor:\n  data_dir: /tmp/cortex/compactor\n  sharding_ring:\n    kvstore:\n      store: inmemory\n\nfrontend_worker:\n  match_max_concurrent: true\n\nruler:\n  enable_api: true\n  enable_sharding: false\n\nruler_storage:\n  backend: local\n  local:\n    directory: /tmp/cortex/rules\n"
  },
  {
    "path": "examples/deployments/1.single-instance/7.cortex-output/containerlab/cortexmetrics.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab17\n\nmgmt:\n  bridge: cortex\n  ipv4-subnet: 172.19.19.0/24\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: \"--config /app/gnmic.yaml --log subscribe\"\n\n    cortex:\n      kind: linux\n      image: quay.io/cortexproject/cortex:v1.18.1\n      ports:\n        - 9009:9009\n      binds:\n        - ./cortex/single-process-config-blocks.yaml:/etc/single-process-config-blocks.yaml:ro\n      cmd: |\n        -config.file=/etc/single-process-config-blocks.yaml\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        # - grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/7.cortex-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab17\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n    \noutputs:\n  cortex:\n    type: prometheus_write\n    url: http://clab-lab17-cortex:9009/api/v1/push\n\n"
  },
  {
    "path": "examples/deployments/1.single-instance/7.cortex-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Cortex\n    orgId: 1\n\ndatasources:\n  - name: Cortex\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab17-cortex:9009/prometheus\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab18\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n    \noutputs:\n  cortex:\n    type: prometheus_write\n    url: http://clab-lab18-victoria:8428/api/v1/write\n\n"
  },
  {
    "path": "examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: victoria\n    orgId: 1\n\ndatasources:\n  - name: victoria\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab18-victoria:8428\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/victoriametrics.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab18\n\nmgmt:\n  bridge: victoria\n  ipv4-subnet: 172.19.19.0/24\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: \"--config /app/gnmic.yaml --log subscribe\"\n\n    victoria:\n      kind: linux\n      image: victoriametrics/victoria-metrics:latest\n      ports:\n        - 8428:8428\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        # - grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/1.single-instance/9.jetstream-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab19\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  js-output:\n    type: jetstream\n    address: clab-lab19-nats:4222\n    subject-format: subscription.target.pathKeys\n    format: proto\n    stream: gnmic\n    write-timeout: 10s\n    num-workers: 2\n    create-stream: {} \n    debug: true\n"
  },
  {
    "path": "examples/deployments/1.single-instance/9.jetstream-output/containerlab/jetstream.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab19\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    spine1:\n    spine2:\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n\n    gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml --log subscribe'\n\n    nats:\n      kind: linux\n      image: nats:latest  \n      ports:\n         - 4222:4222\n         - 6222:6222\n         - 8222:8222\n      cmd: '--http_port 8222 -js -D'\n\n  links:\n    # spine1 links\n    - endpoints: [\"spine1:e1-1\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-2\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-3\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-1\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-2\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-3\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf4:e1-2\"]"
  },
  {
    "path": "examples/deployments/2.clusters/1.influxdb-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab21\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab21-consul-agent:8500\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://clab-lab21-influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/2.clusters/1.influxdb-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\ndeleteDatasources:\n  - name: InfluxDB\n    orgId: 1\ndatasources:\n  - name: InfluxDB\n    type: influxdb\n    orgId: 1\n    url: http://clab-lab21-influxdb:8086\n    user: gnmic\n    password: gnmic\n    database: telemetry\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/1.influxdb-output/containerlab/lab21.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab21\n \ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n\n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic1\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic2\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic3\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    influxdb:\n      kind: linux\n      image: influxdb:1.8.10\n      ports:\n        - 8086:8086\n      env:\n        INFLUXDB_DATA_ENGINE: tsm1\n        INFLUXDB_REPORTING_DISABLED: \"false\"\n        INFLUXDB_USER: gnmic\n        INFLUXDB_USER_PASSWORD: gnmic\n        INFLUXDB_DB: telemetry\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n        \n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/1.influxdb-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    environment:\n      - GNMIC_API=:7890\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1\n    networks:\n      - gnmic-net\n    ports:\n      - 7890:7890\n    depends_on:\n      - consul-agent\n      - influxdb\n\n  gnmic2:\n    <<: *gnmic\n    container_name: gnmic2\n    environment:\n      - GNMIC_API=:7891\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2\n    ports:\n      - 7891:7891\n\n  gnmic3:\n    <<: *gnmic\n    container_name: gnmic3\n    environment:\n      - GNMIC_API=:7892\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3\n    ports:\n      - 7892:7892\n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0    \n\n  influxdb:\n    image: influxdb:1.8.10\n    container_name: influxdb\n    networks:\n      - gnmic-net\n    ports:\n      - \"8083:8083\"\n      - \"8086:8086\"\n      - \"8090:8090\"\n    environment:\n      - INFLUXDB_DATA_ENGINE=tsm1\n      - INFLUXDB_REPORTING_DISABLED=false\n      - INFLUXDB_USER=gnmic\n      - INFLUXDB_USER_PASSWORD=gnmic\n      - INFLUXDB_DB=telemetry\n    volumes:\n      - influx-storage:/var/lib/influxdb\n\nvolumes:\n  influx-storage:"
  },
  {
    "path": "examples/deployments/2.clusters/1.influxdb-output/docker-compose/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: consul-agent:8500\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab22\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab22-consul-agent:8500\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: clab-lab22-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab22-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/containerlab/lab22.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab22\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic1\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic1:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic2\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic2:9805\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic3\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic3:9806\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab22-consul-agent:8500\n        services:\n          - prometheus-output1\n          - cluster2-gnmic-api"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    environment:\n      - GNMIC_API=:7890\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic1:9804\n    networks:\n      - gnmic-net\n    ports:\n      - 7890:7890\n      - 9804:9804\n    depends_on:\n      - consul-agent \n\n  gnmic2:\n    <<: *gnmic\n    container_name: gnmic2\n    environment:\n      - GNMIC_API=:7891\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic2:9805\n    ports:\n      - 7891:7891\n      - 9805:9805\n\n  gnmic3:\n    <<: *gnmic\n    container_name: gnmic3\n    environment:\n      - GNMIC_API=:7892\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic3:9806\n    ports:\n      - 7892:7892\n      - 9806:9806\n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0    \n\n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/docker-compose/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: consul-agent:8500\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n        services:\n          - prometheus-output1\n          - cluster2-gnmic-api\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul/deployment.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: consul-deploy\n  labels:\n    app: consul\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: consul\n  template:\n    metadata:\n      labels:\n        app: consul\n    spec:\n      containers:\n      - args:\n        - agent\n        - -server\n        - -ui\n        - -node=server-1\n        - -bootstrap-expect=1\n        - -client=0.0.0.0\n        image: consul\n        imagePullPolicy: IfNotPresent\n        name: consul\n        ports:\n        - containerPort: 8500\n          name: consul\n          protocol: TCP\n        resources:\n          limits:\n            cpu: 100m\n            memory: 256Mi\n          requests:\n            cpu: 50m\n            memory: 128Mi\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul/service.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: consul-svc\nspec:\n  ports:\n  - name: http\n    port: 8500\n    protocol: TCP\n  selector:\n    app: consul\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/configmap.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gnmic-config\ndata:\n  config.yaml: |\n    insecure: true\n    encoding: json_ietf\n    log: true\n\n    clustering:\n      cluster-name: cluster1\n      targets-watch-timer: 30s\n      locker:\n        type: consul\n        address: consul-svc:8500\n\n    targets:\n      # Add targets configuration here\n      # e.g:\n      # 192.168.1.131:57400:\n      #   username: gnmic\n      #   password: secret_password\n\n    subscriptions:\n      # Add subscriptions configuration here\n      # e.g:\n      # sub1:\n      #   paths:\n      #     - /interface/statistics\n      #   stream-mode: sample\n      #   sample-interval: 1s\n\n    outputs:\n      output1:\n        type: prometheus\n\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/secret.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: v1\nkind: Secret\nmetadata:\n  name: gnmic-login\ntype: Opaque\nstringData:\n  GNMIC_PASSWORD: NokiaSrl1!\n  GNMIC_USERNAME: admin\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/service.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: gnmic-svc\n  labels:\n    app: gnmic\nspec:\n  ports:\n  - name: http\n    port: 9804\n    protocol: TCP\n    targetPort: 9804\n  selector:\n    app: gnmic\n  clusterIP: None\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/statefulset.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n  name: gnmic-ss\n  labels:\n    app: gnmic\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: gnmic\n  serviceName: gnmic-svc\n  template:\n    metadata:\n      labels:\n        app: gnmic\n    spec:\n      containers:\n        - args:\n            - subscribe\n            - --config\n            - /app/config.yaml\n          image: ghcr.io/openconfig/gnmic\n          imagePullPolicy: IfNotPresent\n          name: gnmic\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              drop:\n                - all\n            readOnlyRootFilesystem: true\n            runAsNonRoot: true\n            runAsUser: 1000\n          ports:\n            - containerPort: 9804\n              name: prom-output\n              protocol: TCP\n            - containerPort: 7890\n              name: gnmic-api\n              protocol: TCP\n          resources:\n            limits:\n              cpu: 100m\n              memory: 400Mi\n            requests:\n              cpu: 50m\n              memory: 200Mi\n          envFrom:\n            - secretRef:\n                name: gnmic-login\n          env:\n            - name: GNMIC_API\n              value: :7890\n            - name: GNMIC_CLUSTERING_INSTANCE_NAME\n              valueFrom:\n                fieldRef:\n                  fieldPath: metadata.name\n            - name: GNMIC_CLUSTERING_SERVICE_ADDRESS\n              value: \"$(GNMIC_CLUSTERING_INSTANCE_NAME).gnmic-svc.gnmic.svc.cluster.local\"\n            - name: GNMIC_OUTPUTS_OUTPUT1_LISTEN\n              value: \"$(GNMIC_CLUSTERING_INSTANCE_NAME).gnmic-svc.gnmic.svc.cluster.local:9804\"\n          volumeMounts:\n            - mountPath: /app/config.yaml\n              name: config\n              subPath: config.yaml\n      volumes:\n        - configMap:\n            defaultMode: 420\n            name: gnmic-config\n          name: config\n"
  },
  {
    "path": "examples/deployments/2.clusters/2.prometheus-output/kubernetes/prometheus/servicemonitor.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: gnmic-sm\n  labels:\n    app: gnmic\nspec:\n  selector:\n    matchLabels:\n      app: gnmic\n  namespaceSelector:\n    matchNames:\n      - gnmic\n  endpoints:\n    - port: http\n      path: /metrics\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\napi-server:\n  enable-metrics: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab23\n      config:\n        outputs:\n          - nats-output\n          \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab23-consul-agent:8500\n\ninputs:\n  nats-input:\n    type: nats\n    address: clab-lab23-nats:4222\n    subject: telemetry\n    outputs:\n      - output1\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab23-nats:4222\n    subject: telemetry\n\n  output1:\n    type: prometheus\n    service-registration:\n      address: clab-lab23-consul-agent:8500\n      use-lock: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab23-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/lab23.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab23\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n\n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic1\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic1:9804\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic2\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic2:9805\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic3\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic3:9806\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n         - 4222:4222\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab23-consul-agent:8500\n        services:\n          - prometheus-output1\n          - cluster2-gnmic-api"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic1: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic1\n    volumes:\n      - ./gnmic.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    environment:\n      - GNMIC_API=:7890\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic1:9804\n    networks:\n      - gnmic-net\n    ports:\n      - 7890:7890\n      - 9804:9804\n    depends_on:\n      - consul-agent\n      - nats\n\n  gnmic2:\n    <<: *gnmic\n    container_name: gnmic2\n    environment:\n      - GNMIC_API=:7891\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic2:9805\n    ports:\n      - 7891:7891\n      - 9805:9805\n\n  gnmic3:\n    <<: *gnmic\n    container_name: gnmic3\n    environment:\n      - GNMIC_API=:7892\n      - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic3:9806\n    ports:\n      - 7892:7892\n      - 9806:9806\n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0    \n\n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n      \nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: consul-agent:8500\n\ntargets:\n  # Add targets configuration here\n  # e.g:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n  #   outputs:\n  #     - nats-output\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\ninputs:\n  nats-input:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - output1\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n\n  output1:\n    type: prometheus\n    service-registration:\n      address: consul-agent:8500\n      use-lock: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n        services:\n          - prometheus-output1\n          - cluster2-gnmic-api\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmi-server.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab24\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7890:7890\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic1\n        GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic1:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic1\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7891:7891\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic2\n        GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic2:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic2\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7892:7892\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic3\n        GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic3:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic3\n\n    agg-gnmic:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7893:7893\n        - 9804:9804\n      env:\n        GNMIC_API: :7893\n        \n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro\n        - grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic-agg.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\nskip-verify: true\n\nloader:\n  type: consul\n  address: clab-lab24-consul-agent:8500\n  debug: true\n  services:\n    - name: cluster2-gnmi-server\n      config:\n        insecure: true\n\nsubscriptions:\n  cluster2:\n    paths:\n      - /\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n  \noutputs:\n  output1:\n    type: prometheus\n    listen: clab-lab24-agg-gnmic:9804\n    service-registration:\n      address: clab-lab24-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab24\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab24-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-lab24-consul-agent:8500\n\noutputs:\n  out1:\n    type: file\n    filename: /dev/null\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards/gNMIc/gnmic_compute_metrics.json",
    "content": "{\n    \"annotations\": {\n      \"list\": [\n        {\n          \"builtIn\": 1,\n          \"datasource\": \"-- Grafana --\",\n          \"enable\": true,\n          \"hide\": true,\n          \"iconColor\": \"rgba(0, 211, 255, 1)\",\n          \"name\": \"Annotations & Alerts\",\n          \"type\": \"dashboard\"\n        }\n      ]\n    },\n    \"editable\": true,\n    \"gnetId\": null,\n    \"graphTooltip\": 0,\n    \"id\": 2,\n    \"links\": [],\n    \"panels\": [\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 0,\n          \"y\": 0\n        },\n        \"hiddenSeries\": false,\n        \"id\": 16,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"process_open_fds\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Open File Descriptors (#)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 12,\n          \"y\": 0\n        },\n        \"hiddenSeries\": false,\n        \"id\": 4,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"go_goroutines\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Go Routines (#)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 0,\n          \"y\": 10\n        },\n        \"hiddenSeries\": false,\n        \"id\": 14,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"go_memstats_stack_inuse_bytes/1000000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Memory Stack In Use (MB)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 12,\n          \"y\": 10\n        },\n        \"hiddenSeries\": false,\n        \"id\": 6,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"go_memstats_alloc_bytes/1000000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}} mem alloc\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Memory Alloc (MB)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 0,\n          \"y\": 20\n        },\n        \"hiddenSeries\": false,\n        \"id\": 12,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"rate(go_memstats_mallocs_total[1m])/1000000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Memory malloc MB/s\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 12,\n          \"y\": 20\n        },\n        \"hiddenSeries\": false,\n        \"id\": 2,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"go_gc_duration_seconds*1000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}} quantile={{quantile}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Go GC duration (ms)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 0,\n          \"y\": 30\n        },\n        \"hiddenSeries\": false,\n        \"id\": 10,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"go_memstats_heap_inuse_bytes/1000000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Memory Heap inUse (MB)\",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      },\n      {\n        \"aliasColors\": {},\n        \"bars\": false,\n        \"dashLength\": 10,\n        \"dashes\": false,\n        \"datasource\": null,\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {}\n          },\n          \"overrides\": []\n        },\n        \"fill\": 1,\n        \"fillGradient\": 0,\n        \"gridPos\": {\n          \"h\": 10,\n          \"w\": 12,\n          \"x\": 0,\n          \"y\": 40\n        },\n        \"hiddenSeries\": false,\n        \"id\": 8,\n        \"legend\": {\n          \"alignAsTable\": true,\n          \"avg\": true,\n          \"current\": true,\n          \"max\": true,\n          \"min\": true,\n          \"show\": true,\n          \"total\": false,\n          \"values\": true\n        },\n        \"lines\": true,\n        \"linewidth\": 1,\n        \"nullPointMode\": \"null\",\n        \"options\": {\n          \"alertThreshold\": true\n        },\n        \"percentage\": false,\n        \"pluginVersion\": \"7.3.7\",\n        \"pointradius\": 2,\n        \"points\": false,\n        \"renderer\": \"flot\",\n        \"seriesOverrides\": [],\n        \"spaceLength\": 10,\n        \"stack\": false,\n        \"steppedLine\": false,\n        \"targets\": [\n          {\n            \"expr\": \"rate(go_memstats_alloc_bytes_total[1m])/1000000\",\n            \"interval\": \"\",\n            \"legendFormat\": \"{{instance}}\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"thresholds\": [],\n        \"timeFrom\": null,\n        \"timeRegions\": [],\n        \"timeShift\": null,\n        \"title\": \"Memory alloc MB/s \",\n        \"tooltip\": {\n          \"shared\": true,\n          \"sort\": 0,\n          \"value_type\": \"individual\"\n        },\n        \"type\": \"graph\",\n        \"xaxis\": {\n          \"buckets\": null,\n          \"mode\": \"time\",\n          \"name\": null,\n          \"show\": true,\n          \"values\": []\n        },\n        \"yaxes\": [\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          },\n          {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }\n        ],\n        \"yaxis\": {\n          \"align\": false,\n          \"alignLevel\": null\n        }\n      }\n    ],\n    \"refresh\": \"10s\",\n    \"schemaVersion\": 26,\n    \"style\": \"dark\",\n    \"tags\": [],\n    \"templating\": {\n      \"list\": []\n    },\n    \"time\": {\n      \"from\": \"now-6h\",\n      \"to\": \"now\"\n    },\n    \"timepicker\": {},\n    \"timezone\": \"\",\n    \"title\": \"gNMIc Compute metrics\",\n    \"uid\": \"EYxvhi77k\",\n    \"version\": 13\n  }"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards/gNMIc/gnmic_grpc_metrics.json",
    "content": "{\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": \"-- Grafana --\",\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"gnetId\": null,\n  \"graphTooltip\": 0,\n  \"id\": 1,\n  \"links\": [],\n  \"panels\": [\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {}\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"hiddenSeries\": false,\n      \"id\": 8,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"7.3.7\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"grpc_server_started_total\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Server Started\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {}\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 0\n      },\n      \"hiddenSeries\": false,\n      \"id\": 2,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"7.3.7\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"rate(grpc_client_msg_received_total[1m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client Msg Rcv/second\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {}\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 10\n      },\n      \"hiddenSeries\": false,\n      \"id\": 6,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"7.3.7\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"rate(grpc_client_msg_sent_total[1m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client Msg Sent/s\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {}\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 10\n      },\n      \"hiddenSeries\": false,\n      \"id\": 4,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"7.3.7\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"grpc_client_started_total\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client started\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    }\n  ],\n  \"schemaVersion\": 26,\n  \"style\": \"dark\",\n  \"tags\": [],\n  \"templating\": {\n    \"list\": []\n  },\n  \"time\": {\n    \"from\": \"now-6h\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {},\n  \"timezone\": \"\",\n  \"title\": \"gNMIc gRPC metrics\",\n  \"uid\": \"9W_Qzi7nz\",\n  \"version\": 6\n}\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\nproviders:\n- name: 'gNMIc Internal Metrics'\n  orgId: 1\n  folder: ''\n  type: file\n  disableDeletion: false\n  editable: true\n  options:\n    path: /var/lib/grafana/dashboards\n    foldersFromFilesStructure: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab24-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/4.gnmi-server/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab24-consul-agent:8500\n        services:\n          - prometheus-output1\n          - cluster2-gnmic-api"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab25-1\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface[name=*]/statistics\n    stream-mode: sample\n    sample-interval: 30s\n    \n  sub2:\n    paths:\n      - /interface[name=*]/admin-state\n    stream-mode: on-change\n\n  sub3:\n    paths:\n      - /interface[name=*]/oper-state\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab25-1-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-lab25-1-consul-agent:8500\n  cache:\n    type: jetstream\n    address: clab-lab25-1-nats:4222\n    debug: true\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: clab-lab25-1-consul-agent:8500\n  "
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab25-1-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/lab25-1.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab25-1\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic1\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic1:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic2\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic2:9805\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic3\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic3:9806\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n    nats:\n      kind: linux\n      image: nats:latest  \n      ports:\n         - 4222:4222\n         - 6222:6222\n         - 8222:8222\n      cmd: '--http_port 8222 -js -D'\n      \n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab25-1-consul-agent:8500\n        services:\n          - cluster2-gnmic-api\n          - prometheus-output1\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/nats/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab25-2\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface[name=*]/statistics\n    stream-mode: sample\n    sample-interval: 30s\n\n  sub2:\n    paths:\n      - /interface[name=*]/admin-state\n    stream-mode: on-change\n\n  sub3:\n    paths:\n      - /interface[name=*]/oper-state\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab25-2-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-lab25-2-consul-agent:8500\n  cache:\n    type: nats\n    address: clab-lab25-2-nats:4222\n    debug: true\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: clab-lab25-2-consul-agent:8500\n\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/nats/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab25-2-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/nats/containerlab/lab25-2.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab25-2\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic1\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic1:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic2\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic2:9805\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic3\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic3:9806\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n    nats:\n      kind: linux\n      image: nats:latest  \n      ports:\n         - 4222:4222\n      cmd: '-D'\n      \n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/nats/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab25-2-consul-agent:8500\n        services:\n          - cluster2-gnmic-api\n          - prometheus-output1\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/redis/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: ascii\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab25-3\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface[name=*]/statistics\n    stream-mode: sample\n    sample-interval: 30s\n\n  sub2:\n    paths:\n      - /interface[name=*]/admin-state\n    stream-mode: on-change\n\n  sub3:\n    paths:\n      - /interface[name=*]/oper-state\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab25-3-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-lab25-3-consul-agent:8500\n  cache:\n    type: redis\n    address: clab-lab25-3-redis:6379\n    debug: true\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: clab-lab25-3-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/redis/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab25-3-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/redis/containerlab/lab25-3.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab25-3\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n    gnmic1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic1\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic1:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic2\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic2:9805\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    gnmic3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic3\n        GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic3:9806\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n    redis:\n      kind: linux\n      image: redis:7\n      ports:\n        - 6379:6379\n      cmd: redis-server --loglevel warning\n      \n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "examples/deployments/2.clusters/5.shared-cache/redis/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab25-3-consul-agent:8500\n        services:\n          - cluster2-gnmic-api\n          - prometheus-output1"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab31\n      config:\n        outputs:\n          - nats-output\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab31-nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: clab-lab31-nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"clab-lab31-gnmic-relay:9804\"\n    service-registration:\n      address: clab-lab31-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab31-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/lab31.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab31\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    srl1:\n    srl2:\n\n    gnmic-collector:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-collector.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic-relay:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-relay.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 9804:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n        - 4222:4222\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab31-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-collector: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-collector\n    volumes:\n      - ./gnmic-collector.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - nats\n\n  gnmic-relay:\n    <<: *gnmic\n    container_name: gnmic-relay\n    volumes:\n      - ./gnmic-relay.yaml:/app/gnmic.yaml\n    ports:\n      - 9804:9804\n    depends_on:\n      - nats\n      - consul-agent\n      \n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\n    \n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n      \nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"gnmic-relay:9804\"\n    service-registration:\n      address: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab32\n      config:\n        outputs:\n          - nats-output\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab32-nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - influxdb-output\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: InfluxDB\n    orgId: 1\n\ndatasources:\n  - name: InfluxDB\n    type: influxdb\n    orgId: 1\n    url: http://clab-lab32-influxdb:8086\n    user: gnmic\n    password: gnmic\n    database: telemetry\n    editable: true"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/lab32.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab32\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      # type: ixr6\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    srl1:\n    srl2:\n\n    gnmic-collector:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-collector.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n    \n    gnmic-relay:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-relay.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 9804:9804\n      cmd: '--config /app/gnmic.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    influxdb:\n      kind: linux\n      image: influxdb:1.8.10\n      ports:\n        - 8086:8086\n      env:\n        INFLUXDB_DATA_ENGINE: tsm1\n        INFLUXDB_REPORTING_DISABLED: \"false\"\n        INFLUXDB_USER: gnmic\n        INFLUXDB_USER_PASSWORD: gnmic\n        INFLUXDB_DB: telemetry\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n        - 4222:4222\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-collector: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-collector\n    volumes:\n      - ./gnmic-collector.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - nats\n      - influxdb\n\n  gnmic-relay:\n    <<: *gnmic\n    container_name: gnmic-relay\n    volumes:\n      - ./gnmic-relay.yaml:/app/gnmic.yaml\n    depends_on:\n      - nats\n      - influxdb\n\n  influxdb:\n    image: influxdb:1.8.10\n    container_name: influxdb\n    networks:\n      - gnmic-net\n    ports:\n      - \"8083:8083\"\n      - \"8086:8086\"\n      - \"8090:8090\"\n    environment:\n      - INFLUXDB_DATA_ENGINE=tsm1\n      - INFLUXDB_REPORTING_DISABLED=false\n      - INFLUXDB_USER=gnmic\n      - INFLUXDB_USER_PASSWORD=gnmic\n      - INFLUXDB_DB=telemetry\n    volumes:\n      - influx-storage:/var/lib/influxdb\n  \n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n\nvolumes:\n  influx-storage:"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - influxdb-output\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: clab-lab33a-nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"clab-lab33a-gnmic-relay:9804\"\n    service-registration:\n      address: clab-lab33a-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\napi-server:\n  enable-metrics: true\n\nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab33a\n      config:\n        outputs:\n          - nats-output\n          \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab33a-consul-agent:8500\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab33a-nats:4222\n    subject: telemetry\n\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab23-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/lab33a.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab33a\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      # type: ixr6\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    srl1:\n    srl2:\n\n    gnmic-collector1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector1\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n    \n    gnmic-collector2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector2\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    gnmic-collector3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector3\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    gnmic-relay:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-relay.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 9804:9804\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n        - 4222:4222\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab33a-consul-agent:8500\n        services:\n          - prometheus-prom-output\n          - cluster2-gnmic-api\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-collector1: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-collector1\n    volumes:\n      - ./gnmic-collector.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    environment:\n      - GNMIC_API=\":7890\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector1\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector1\"\n    networks:\n      - gnmic-net\n    ports:\n      - 7890:7890\n    depends_on:\n      - nats\n\n  gnmic-collector2:\n    <<: *gnmic\n    container_name: gnmic-collector2\n    environment:\n      - GNMIC_API=\":7891\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector2\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector2\"   \n    ports:\n      - 7891:7891\n\n  gnmic-collector3: \n    <<: *gnmic\n    container_name: gnmic-collector3\n    environment:\n      - GNMIC_API=\":7892\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector3\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector3\"   \n    ports:\n      - 7892:7892\n\n  gnmic-relay:\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-relay\n    command: \"subscribe --config /app/gnmic.yaml\"\n    volumes:\n      - ./gnmic-relay.yaml:/app/gnmic.yaml\n    ports:\n      - 9804:9804\n    depends_on:\n      - nats\n      - consul-agent\n      \n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\n    \n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n      \nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: consul-agent:8500\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: input\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"gnmic-relay:9804\"\n    service-registration:\n      address: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: clab-lab33b-nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"clab-lab33b-gnmic-relay:9804\"\n    service-registration:\n      address: clab-lab33b-consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/gnmic.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\napi-server:\n  enable-metrics: true\n  \nloader:\n  type: docker\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab33b\n      config:\n        outputs:\n          - nats-output\n          \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\n# clustering config\nclustering:\n  cluster-name: cluster2\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: clab-lab33b-consul-agent:8500\n\noutputs:\n  nats-output:\n    type: nats\n    address: clab-lab33b-nats:4222\n    subject: telemetry\n\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/grafana/datasources/datasource.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\napiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-lab23-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/lab33b.clab.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nname: lab33b\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      # type: ixr6\n      image: ghcr.io/nokia/srlinux\n\n  nodes:\n    srl1:\n    srl2:\n\n    gnmic-collector1:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7890:7890\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector1\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n    \n    gnmic-collector2:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7891:7891\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector2\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    gnmic-collector3:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 7892:7892\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector3\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    gnmic-relay:\n      kind: linux\n      image: ghcr.io/openconfig/gnmic:latest\n      binds:\n        - ./gnmic-relay.yaml:/app/gnmic-config.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      ports:\n        - 9804:9804\n      cmd: '--config /app/gnmic-config.yaml subscribe'\n\n    consul-agent:\n      kind: linux\n      image: hashicorp/consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      kind: linux\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n        - 4222:4222\n\n    grafana:\n      kind: linux\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        #- grafana/dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-lab33b-consul-agent:8500\n        services:\n          - prometheus-prom-output\n          - cluster2-gnmic-api\n\n\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-collector1: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-collector1\n    volumes:\n      - ./gnmic-collector.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    environment:\n      - GNMIC_API=\":7890\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector1\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector1\"\n    networks:\n      - gnmic-net\n    ports:\n      - 7890:7890\n    depends_on:\n      - nats\n\n  gnmic-collector2:\n    <<: *gnmic\n    container_name: gnmic-collector2\n    environment:\n      - GNMIC_API=\":7891\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector2\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector2\"   \n    ports:\n      - 7891:7891\n\n  gnmic-collector3: \n    <<: *gnmic\n    container_name: gnmic-collector3\n    environment:\n      - GNMIC_API=\":7892\"\n      - GNMIC_CLUSTERING_INSTANCE_NAME=\"gnmic-collector3\"\n      - GNMIC_CLUSTERING_SERVICE_ADDRESS=\"gnmic-collector3\"   \n    ports:\n      - 7892:7892\n\n  gnmic-relay1:\n    <<: *gnmic\n    container_name: gnmic-relay1\n    volumes:\n      - ./gnmic-relay.yaml:/app/gnmic.yaml\n    environment:\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=\"gnmic-relay1:9804\"   \n    ports:\n      - 9804:9804\n    depends_on:\n      - nats\n      - consul-agent\n  \n  gnmic-relay2:\n    <<: *gnmic\n    container_name: gnmic-relay2\n    volumes:\n      - ./gnmic-relay.yaml:/app/gnmic.yaml\n    environment:\n      - GNMIC_OUTPUTS_OUTPUT1_LISTEN=\"gnmic-relay2:9805\"   \n    ports:\n      - 9805:9805\n    depends_on:\n      - nats\n      - consul-agent\n  \n  gnmic-relay3:\n    <<: *gnmic\n    container_name: gnmic-relay\n    volumes:\n      - ./gnmic-relay3.yaml:/app/gnmic.yaml\n    ports:\n      - 9806:9806\n    depends_on:\n      - nats\n      - consul-agent\n                    \n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\n    \n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n      \nvolumes:\n  prometheus-data:\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\napi-server:\n  enable-metrics: true\n\n# clustering config\nclustering:\n  cluster-name: cluster1\n  targets-watch-timer: 30s\n  locker:\n    type: consul\n    address: consul-agent:8500\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-relay.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  output1:\n    type: prometheus\n    service-registration:\n      address: consul-agent:8500\n      use-lock: true\n"
  },
  {
    "path": "examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n        services:\n          - prometheus-prom-output\n          - cluster2-gnmic-api\n\n"
  },
  {
    "path": "examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/docker-compose.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nversion: '3' \n\nnetworks:\n  gnmic-net:\n    driver: bridge\n\nservices:\n  gnmic-collector: &gnmic\n    image: ghcr.io/openconfig/gnmic:latest\n    container_name: gnmic-collector\n    volumes:\n      - ./gnmic-collector.yaml:/app/gnmic.yaml\n    command: \"subscribe --config /app/gnmic.yaml\"\n    networks:\n      - gnmic-net\n    depends_on:\n      - nats\n\n  gnmic-relay1:\n    <<: *gnmic\n    container_name: gnmic-relay1\n    volumes:\n      - ./gnmic-relay1.yaml:/app/gnmic.yaml\n    ports:\n      - 9804:9804\n    depends_on:\n      - nats\n      - consul-agent\n\n  gnmic-relay2:\n    <<: *gnmic\n    container_name: gnmic-relay2\n    volumes:\n      - ./gnmic-relay2.yaml:/app/gnmic.yaml\n    depends_on:\n      - nats\n      - influxdb\n\n  consul-agent:\n    image: hashicorp/consul:latest\n    container_name: consul\n    networks:\n      - gnmic-net\n    ports:\n      - 8500:8500\n      - 8600:8600/udp\n    command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0\n    \n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    volumes:\n      - ./prometheus/:/etc/prometheus/\n      - prometheus-data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n      - '--web.console.templates=/usr/share/prometheus/consoles'\n      - '--log.level=debug'\n    ports:\n      - 9090:9090\n    networks:\n      - gnmic-net\n\n  nats:\n    image: 'nats:latest'\n    container_name: nats\n    networks:\n      - gnmic-net    \n    ports:\n      - \"4222:4222\"\n      - \"6222:6222\"\n      - \"8222:8222\"\n\n  influxdb:\n    image: influxdb:1.8.10\n    container_name: influxdb\n    networks:\n      - gnmic-net\n    ports:\n      - \"8083:8083\"\n      - \"8086:8086\"\n      - \"8090:8090\"\n    environment:\n      - INFLUXDB_DATA_ENGINE=tsm1\n      - INFLUXDB_REPORTING_DISABLED=false\n      - INFLUXDB_USER=gnmic\n      - INFLUXDB_USER_PASSWORD=gnmic\n      - INFLUXDB_DB=telemetry\n    volumes:\n      - influx-storage:/var/lib/influxdb\n\nvolumes:\n  prometheus-data:\n  influx-storage:\n"
  },
  {
    "path": "examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-collector.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nusername: admin\npassword: NokiaSrl1!\ninsecure: true\nencoding: json_ietf\nlog: true\n\ntargets:\n  # Add targets configuration here\n  # eg:\n  # 192.168.1.131:57400:\n  #   username: gnmic\n  #   password: secret_password\n\n\nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  # sub1:\n  #   paths:\n  #     - /interface/statistics\n  #   stream-mode: sample\n  #   sample-interval: 1s\n\n\noutputs:\n  nats-output:\n    type: nats\n    address: nats:4222\n    subject: telemetry\n"
  },
  {
    "path": "examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-relay1.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: input\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - prom-output\n\noutputs:\n  prom-output:\n    type: prometheus\n    listen: \"gnmic-relay1:9804\"\n    service-registration:\n      address: consul-agent:8500\n"
  },
  {
    "path": "examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-relay2.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlog: true\n\ninputs:\n  nats-input:\n    type: input\n    address: nats:4222\n    subject: telemetry\n    outputs:\n      - influxdb-output\n\noutputs:\n  influxdb-output:\n    type: influxdb\n    url: http://influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s"
  },
  {
    "path": "examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/prometheus/prometheus.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nglobal:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: consul-agent:8500\n"
  },
  {
    "path": "examples/pkg/capabilities_rpc/main.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n)\n\nfunc main() {\n\t// create a target\n\ttg, err := api.NewTarget(\n\t\tapi.Name(\"srl1\"),\n\t\tapi.Address(\"10.0.0.1:57400\"),\n\t\tapi.Username(\"admin\"),\n\t\tapi.Password(\"admin\"),\n\t\tapi.SkipVerify(true),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// create a gNMI client\n\terr = tg.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tg.Close()\n\t// send a gNMI capabilities request to the created target\n\tcapResp, err := tg.Capabilities(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(capResp))\n}\n"
  },
  {
    "path": "examples/pkg/get_rpc/main.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n)\n\nfunc main() {\n\t// create a target\n\ttg, err := api.NewTarget(\n\t\tapi.Name(\"srl1\"),\n\t\tapi.Address(\"srl1:57400\"),\n\t\tapi.Username(\"admin\"),\n\t\tapi.Password(\"admin\"),\n\t\tapi.SkipVerify(true),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// create a gNMI client\n\terr = tg.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tg.Close()\n\t// create a GetREquest\n\tgetReq, err := api.NewGetRequest(\n\t\tapi.Path(\"/system/name\"),\n\t\tapi.Encoding(\"json_ietf\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(getReq))\n\t// send the created gNMI GetRequest to the created target\n\tgetResp, err := tg.Get(ctx, getReq)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(getResp))\n}\n"
  },
  {
    "path": "examples/pkg/set_rpc/main.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n)\n\nfunc main() {\n\t// create a target\n\ttg, err := api.NewTarget(\n\t\tapi.Name(\"srl1\"),\n\t\tapi.Address(\"srl1:57400\"),\n\t\tapi.Username(\"admin\"),\n\t\tapi.Password(\"admin\"),\n\t\tapi.SkipVerify(true),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\terr = tg.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tg.Close()\n\t// create a gNMI SetRequest\n\tsetReq, err := api.NewSetRequest(\n\t\tapi.Update(\n\t\t\tapi.Path(\"/interface[name=ethernet-1/1]\"),\n\t\t\tapi.Value(map[string]interface{}{\n\t\t\t\t\"admin-state\": \"enable\",\n\t\t\t}, \"json_ietf\")),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(setReq))\n\t// send the created gNMI SetRequest to the created target\n\tsetResp, err := tg.Set(ctx, setReq)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(setResp))\n}\n"
  },
  {
    "path": "examples/pkg/subscribe_rpc/main.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n)\n\nfunc main() {\n\t// create a target\n\ttg, err := api.NewTarget(\n\t\tapi.Name(\"srl1\"),\n\t\tapi.Address(\"srl1:57400\"),\n\t\tapi.Username(\"admin\"),\n\t\tapi.Password(\"admin\"),\n\t\tapi.SkipVerify(true),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\terr = tg.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tg.Close()\n\t// create a gNMI subscribeRequest\n\tsubReq, err := api.NewSubscribeRequest(\n\t\tapi.SubscriptionListMode(\"stream\"),\n\t\tapi.Subscription(\n\t\t\tapi.Path(\"system/name\"),\n\t\t\tapi.SubscriptionMode(\"sample\"),\n\t\t\tapi.SampleInterval(10*time.Second),\n\t\t))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(prototext.Format(subReq))\n\t// start the subscription\n\tgo tg.Subscribe(ctx, subReq, \"sub1\")\n\t// start a goroutine that will stop the subscription after x seconds\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(42 * time.Second):\n\t\t\ttg.StopSubscription(\"sub1\")\n\t\t}\n\t}()\n\tsubRspChan, subErrChan := tg.ReadSubscriptions()\n\tfor {\n\t\tselect {\n\t\tcase rsp := <-subRspChan:\n\t\t\tfmt.Println(prototext.Format(rsp.Response))\n\t\tcase tgErr := <-subErrChan:\n\t\t\tlog.Fatalf(\"subscription %q stopped: %v\", tgErr.SubscriptionName, tgErr.Err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "examples/plugins/demo/main.go",
    "content": "package main\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n)\n\nconst (\n\tprocessorType = \"event-add-device_function\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\ntype MyEventProcessor struct {\n\tformatters.BaseProcessor\n\tDebug bool `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttargetsConfigs        map[string]*types.TargetConfig\n\tactionsDefinitions    map[string]map[string]interface{}\n\tprocessorsDefinitions map[string]map[string]any\n\tlogger                *log.Logger\n}\n\nfunc (p *MyEventProcessor) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tp.setupLogger()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p *MyEventProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range event {\n\t\tif e.Tags == nil {\n\t\t\te.Tags = make(map[string]string)\n\t\t}\n\t\te.Tags[\"device_function\"] = \"CORE\"\n\n\t}\n\treturn event\n}\n\nfunc (p *MyEventProcessor) Close() error {\n\treturn nil\n}\n\nfunc (p *MyEventProcessor) WithActions(act map[string]map[string]interface{}) {\n\tp.actionsDefinitions = act\n}\n\nfunc (p *MyEventProcessor) WithTargets(tcs map[string]*types.TargetConfig) {\n\tp.targetsConfigs = tcs\n}\n\nfunc (p *MyEventProcessor) WithProcessors(procs map[string]map[string]any) {\n\tp.processorsDefinitions = procs\n}\n\nfunc (p *MyEventProcessor) WithLogger(l *log.Logger) {\n}\n\nfunc (p *MyEventProcessor) setupLogger() {\n\tif !p.Debug {\n\t\tp.logger = log.New(io.Discard, \"\", 0)\n\t}\n}\n\nfunc main() {\n\tlogger := log.New(os.Stderr, \"\", log.Flags()&^log.Ldate&^log.Ltime&^log.Lmsgprefix)\n\tlogger.Printf(\"starting plugin\")\n\tplug := &MyEventProcessor{logger: logger}\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: nil,\n\t})\n}\n"
  },
  {
    "path": "examples/plugins/event-add-hostname/README.md",
    "content": "# Add hostname processor plugin\n\n`event-add-hostname` is an event processor that gNMIc starts as a plugin. It enriches received gNMI notifications with the collector hostname as a tag.\n\n## Build\n\nTo build the plugin run:\n\n```bash\ncd examples/plugins/event-add-hostname\ngo build -o event-add-hostname\n```\n\n## Running the plugin\n\n- To run the plugin point gNMIc to the directory where the plugin binary resides. Either using the flag `--plugin-processors-path | -P`:\n\n```bash\ngnmic --config gnmic.yaml subscribe -P /path/to/plugin/bin\n```\n\nOr using the config file:\n\n```yaml\nplugins:\n  path: /path/to/plugin/bin\n  glob: \"*\"\n  start-timeout: 0s\n```\n\nThis allows gNMIc to discover the plugin executable and initialize it. Make sure the files gNMIc loads are executable.\n\n- Next configure the plugin as a processor:\n\n```yaml\nprocessors:\n  proc1:\n    event-add-hostname:\n      debug: true\n      # the tag name to add with the host hostname as a tag value.\n      hostname-tag-name: \"collector-host\"\n      # read-interval controls how often the plugin runs the hostname cmd to get the host hostanme\n      # by default it's at most every 1 minute\n      read-interval: 1m\n```\n\nThe processor type `event-add-hostname` should match the executable filename.\n\n- Then add that processor under an output just like a you would do it with a regular processor:\n\n```yaml\noutputs:\n  out1:\n    type: file\n    format: event\n    event-processors:\n      - proc1\n```\n\nThe resulting event message should have a new tag called `collector-host`\n\n```json\n[\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1704572759243640092,\n    \"tags\": {\n      \"collector-host\": \"kss\",\n      \"interface_name\": \"ethernet-1/1\",\n      \"source\": \"clab-ex-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"4105346\"\n    }\n  }\n]\n```\n"
  },
  {
    "path": "examples/plugins/event-add-hostname/event-add-hostname.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n)\n\nconst (\n\tprocessorType = \"event-add-hostname\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n\thostnameCmd   = \"hostname\"\n)\n\ntype addHostnameProcessor struct {\n\tformatters.BaseProcessor\n\tDebug           bool          `mapstructure:\"debug,omitempty\" yaml:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tReadInterval    time.Duration `mapstructure:\"read-interval,omitempty\" yaml:\"read-interval,omitempty\" json:\"read-interval,omitempty\"`\n\tHostnameTagName string        `mapstructure:\"hostname-tag-name,omitempty\" yaml:\"hostname-tag-name,omitempty\" json:\"hostname-tag-name,omitempty\"`\n\n\tm        *sync.RWMutex\n\thostname string\n\tlastRead time.Time\n\n\ttargetsConfigs        map[string]*types.TargetConfig\n\tactionsDefinitions    map[string]map[string]interface{}\n\tprocessorsDefinitions map[string]map[string]any\n\tlogger                hclog.Logger\n}\n\nfunc (p *addHostnameProcessor) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.setupLogger()\n\tif p.ReadInterval <= 0 {\n\t\tp.ReadInterval = time.Minute\n\t}\n\tif p.HostnameTagName == \"\" {\n\t\tp.HostnameTagName = \"collector-hostname\"\n\t}\n\treturn p.readHostname()\n}\n\nfunc (p *addHostnameProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\n\terr := p.readHostname()\n\tif err != nil {\n\t\tp.logger.Error(\"failed to read hostname\", \"error\", err)\n\t}\n\tfor _, e := range event {\n\t\tif e.Tags == nil {\n\t\t\te.Tags = make(map[string]string)\n\t\t}\n\t\te.Tags[p.HostnameTagName] = p.hostname\n\t}\n\treturn event\n}\n\nfunc (p *addHostnameProcessor) Close() error {\n\treturn nil\n}\n\nfunc (p *addHostnameProcessor) WithActions(act map[string]map[string]interface{}) {\n\tp.actionsDefinitions = act\n}\n\nfunc (p *addHostnameProcessor) WithTargets(tcs map[string]*types.TargetConfig) {\n\tp.targetsConfigs = tcs\n}\n\nfunc (p *addHostnameProcessor) WithProcessors(procs map[string]map[string]any) {\n\tp.processorsDefinitions = procs\n}\n\nfunc (p *addHostnameProcessor) WithLogger(l *log.Logger) {\n}\n\nfunc (p *addHostnameProcessor) setupLogger() {\n\tp.logger = hclog.New(&hclog.LoggerOptions{\n\t\tOutput:     os.Stderr,\n\t\tTimeFormat: \"2006/01/02 15:04:05.999999\",\n\t})\n\tif p.Debug {\n\t\tp.logger.SetLevel(hclog.Debug)\n\t}\n}\n\nfunc (p *addHostnameProcessor) readHostname() error {\n\tnow := time.Now()\n\tif p.lastRead.After(now.Add(-p.ReadInterval)) {\n\t\treturn nil\n\t}\n\t//\n\tcmd := exec.Command(hostnameCmd)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.hostname = string(bytes.TrimSpace(out))\n\tp.lastRead = now\n\treturn nil\n}\n\nfunc main() {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tOutput:      os.Stderr,\n\t\tDisableTime: true,\n\t})\n\n\tlogger.Info(\"starting plugin processor\", \"name\", processorType)\n\n\tplug := &addHostnameProcessor{\n\t\tm: new(sync.RWMutex),\n\t}\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: logger,\n\t})\n}\n"
  },
  {
    "path": "examples/plugins/event-gnmi-get/README.md",
    "content": "# gNMI Get based notification enriching processor plugin\n\n`event-gnmi-get` is an event processor that gNMIc starts as a plugin. It enriches received gNMI notifications with tags retrieved using a gNMI Get RPC.\n\n## Building the plugin\n\n```bash\ncd examples/plugins/event-gnmi-get\ngo build -o event-gnmi-get\n```\n\n## Running the plugin\n\n- To run the plugin point gNMIc to the directory where the plugin binary resides. Either using the flag `--plugin-processors-path | -P`:\n\n```bash\ngnmic --config gnmic.yaml subscribe -P /path/to/plugin/bin\n```\n\nOr using the config file:\n\n```yaml\nplugins:\n  path: /path/to/plugin/bin\n  glob: \"*\"\n  start-timeout: 0s\n```\n\nThis allows gNMIc to discover the plugin executable and initialize it. Make sure the files gNMIc loads are executable.\n\n- Next configure the plugin as a processor:\n\n```yaml\nprocessors:\n  proc2:\n    event-gnmi-get:\n      debug: true\n      encoding: ascii\n      data-type: all\n      paths:\n        - path: \"platform/chassis/type\"\n          tag-name: \"chassis-type\"\n        - path: \"platform/chassis/hw-mac-address\"\n          tag-name: \"hw-mac-address\"\n        - path: \"system/name/host-name\"\n          tag-name: \"hostname\"\n```\n\nThe processor type `event-gnmi-get` should match the executable filename.\n\n- Then add that processor under an output just like a you would do it with a regular processor:\n\n```yaml\noutputs:\n  out1:\n    type: file\n    format: event\n    event-processors:\n      - proc2\n```\n\nThe resulting event message should have a set of new tags called `chassis-type`, `hw-mac-address` and `hostname`.\n\n```json\n[\n  {\n    \"name\": \"sub1\",\n    \"timestamp\": 1704573345190497607,\n    \"tags\": {\n      \"chassis-type\": \"7220 IXR-D2\",\n      \"hostname\": \"srl1\",\n      \"interface_name\": \"ethernet-1/1\",\n      \"hw-mac-address\": \"1A:F2:00:FF:00:00\",\n      \"source\": \"clab-ex-srl1\",\n      \"subscription-name\": \"sub1\"\n    },\n    \"values\": {\n      \"/srl_nokia-interfaces:interface/statistics/out-octets\": \"4108666\"\n    }\n  }\n]\n```\n\n## Examples\n\n### trigger get request directly to the node\n\n```yaml\nprocessors:\n  proc1:\n    event-gnmi-get:\n      debug: true\n      encoding: ascii\n      data-type: all\n      paths:\n        - path: \"platform/chassis/type\"\n          tag-name: \"chassis-type\"\n        - path: \"platform/chassis/hw-mac-address\"\n          tag-name: \"hw-mac-address\"\n        - path: \"system/name/host-name\"\n          tag-name: \"hostname\"\n```\n\n### trigger get request through gNMIc's gNMI server\n\n```yaml\n# enable gNMIc gNMI server\ngnmi-server:\n  address: :57401\n\nprocessors:\n  proc1:\n    event-gnmi-get:\n      debug: true\n      encoding: ascii\n      data-type: all\n      # set the gNMI Get target to the local gNMI server address\n      target: localhost:57401\n      # include the actual target name in the GetRequest Prefix\n      prefix-target: '{{ index .Tags \"source\" }}'\n      paths:\n        - path: \"platform/chassis/type\"\n          tag-name: \"chassis-type\"\n        - path: \"platform/chassis/hw-mac-address\"\n          tag-name: \"hw-mac-address\"\n        - path: \"system/name/host-name\"\n          tag-name: \"hostname\"\n```\n"
  },
  {
    "path": "examples/plugins/event-gnmi-get/event-gnmi-get.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\tgpath \"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n\tgtemplate \"github.com/openconfig/gnmic/pkg/gtemplate\"\n)\n\nconst (\n\tprocessorType = \"event-gnmi-get\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n\tdefaultTarget = `{{ index .Tags \"source\" }}`\n)\n\ntype gNMIGetProcessor struct {\n\tformatters.BaseProcessor\n\tDebug        bool          `mapstructure:\"debug,omitempty\" yaml:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tReadPeriod   time.Duration `mapstructure:\"read-period,omitempty\" yaml:\"read-period,omitempty\" json:\"read-period,omitempty\"`\n\tTarget       string        `mapstructure:\"target,omitempty\" yaml:\"target,omitempty\" json:\"target,omitempty\"`\n\tPrefixTarget string        `mapstructure:\"prefix-target,omitempty\" yaml:\"prefix-target,omitempty\" json:\"prefix-target,omitempty\"`\n\tPaths        []*pathToTag  `mapstructure:\"paths,omitempty\" yaml:\"paths,omitempty\" json:\"paths,omitempty\"`\n\tType         string        `mapstructure:\"data-type,omitempty\" yaml:\"type,omitempty\" json:\"type,omitempty\"`\n\tEncoding     string        `mapstructure:\"encoding,omitempty\" yaml:\"encoding,omitempty\" json:\"encoding,omitempty\"`\n\tSkipVerify   bool          `mapstructure:\"skip-verify,omitempty\" yaml:\"skip-verify,omitempty\" json:\"skip-verify,omitempty\"`\n\n\tm               *sync.RWMutex\n\tprefixTargetTpl *template.Template\n\ttargetTpl       *template.Template\n\t// values read indexed by targetName\n\tvals map[string]*readValues\n\n\ttargetsConfigs        map[string]*types.TargetConfig\n\tactionsDefinitions    map[string]map[string]interface{}\n\tprocessorsDefinitions map[string]map[string]any\n\tlogger                hclog.Logger\n}\n\ntype pathToTag struct {\n\tPath    string `mapstructure:\"path,omitempty\" yaml:\"path,omitempty\" json:\"path,omitempty\"`\n\tTagName string `mapstructure:\"tag-name,omitempty\" yaml:\"tag-name,omitempty\" json:\"tag-name,omitempty\"`\n\n\tpathTpl *template.Template\n}\n\ntype readValues struct {\n\tvals     map[string]string\n\tlastRead time.Time\n}\n\nfunc (p *gNMIGetProcessor) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\tp.setupLogger()\n\tp.logger.Info(\"initializing\", \"processor\", processorType, \"cfg\", cfg)\n\tif p.Target == \"\" {\n\t\tp.Target = defaultTarget\n\t}\n\tif p.ReadPeriod <= 0 {\n\t\tp.ReadPeriod = time.Minute\n\t}\n\tif p.Type == \"\" {\n\t\tp.Type = \"all\"\n\t}\n\t// init PrefixTarget if any\n\tif p.PrefixTarget != \"\" {\n\t\tp.prefixTargetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-prefix-target\", processorType), p.PrefixTarget)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// init target template\n\tp.targetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-target\", processorType), p.Target)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init paths templates\n\tfor i, pd := range p.Paths {\n\t\tpd.pathTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"path-%d\", i), pd.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tp.logger.Info(\"initialized\", \"processor\", processorType)\n\treturn nil\n}\n\nfunc (p *gNMIGetProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\n\tfor _, e := range event {\n\t\ttargetName, err := p.readPaths(e)\n\t\tif err != nil {\n\t\t\tp.logger.Error(\"failed to read paths\", \"error\", err)\n\t\t}\n\t\tif _, ok := p.vals[targetName]; !ok {\n\t\t\tp.logger.Error(\"unknown target\", \"target\", targetName)\n\t\t\tcontinue\n\t\t}\n\t\tif e.Tags == nil {\n\t\t\te.Tags = make(map[string]string)\n\t\t}\n\t\tfor k, v := range p.vals[targetName].vals {\n\t\t\te.Tags[k] = v\n\t\t}\n\t}\n\treturn event\n}\n\nfunc (p *gNMIGetProcessor) Close() error {\n\treturn nil\n}\n\nfunc (p *gNMIGetProcessor) WithActions(act map[string]map[string]interface{}) {\n\tp.actionsDefinitions = act\n}\n\nfunc (p *gNMIGetProcessor) WithTargets(tcs map[string]*types.TargetConfig) {\n\tp.targetsConfigs = tcs\n}\n\nfunc (p *gNMIGetProcessor) WithProcessors(procs map[string]map[string]any) {\n\tp.processorsDefinitions = procs\n}\n\nfunc (p *gNMIGetProcessor) WithLogger(l *log.Logger) {\n}\n\nfunc (p *gNMIGetProcessor) setupLogger() {\n\tp.logger = hclog.New(&hclog.LoggerOptions{\n\t\tOutput:     os.Stderr,\n\t\tTimeFormat: \"2006/01/02 15:04:05.999999\",\n\t})\n\n\tif p.Debug {\n\t\tp.logger.SetLevel(hclog.Debug)\n\t}\n}\n\nfunc (p *gNMIGetProcessor) readPaths(e *formatters.EventMsg) (string, error) {\n\tnow := time.Now()\n\n\tvar err error\n\tb := new(bytes.Buffer)\n\tswitch {\n\tcase p.prefixTargetTpl != nil:\n\t\terr = p.prefixTargetTpl.Execute(b, e)\n\tcase p.targetTpl != nil:\n\t\terr = p.targetTpl.Execute(b, e)\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttargetName := b.String()\n\t_, ok := p.vals[targetName]\n\tif !ok {\n\t\tp.vals[targetName] = &readValues{\n\t\t\tvals: map[string]string{},\n\t\t}\n\t}\n\tif p.vals[targetName].lastRead.After(now.Add(-p.ReadPeriod)) {\n\t\treturn targetName, nil\n\t}\n\t//\n\tvals, err := p.gnmiGet(targetName, e)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tp.logger.Debug(\"vals from get\", \"vals\", vals)\n\tp.vals[targetName].vals = vals\n\tp.vals[targetName].lastRead = time.Now()\n\treturn targetName, nil\n}\n\nfunc (p *gNMIGetProcessor) gnmiGet(targetName string, e *formatters.EventMsg) (map[string]string, error) {\n\ttc, err := p.selectTarget(targetName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt := target.NewTarget(tc)\n\treq, keyPathMapping, err := p.createGetRequest(e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.logger.Debug(\"keyPathMapping\", \"mapping\", keyPathMapping)\n\n\tctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)\n\tdefer cancel()\n\n\terr = t.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer t.Close()\n\n\tresp, err := t.Get(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"target %q GetRequest failed: %v\", t.Config.Name, err)\n\t}\n\n\treturn p.extractTags(resp, keyPathMapping), nil\n}\n\nfunc (p *gNMIGetProcessor) selectTarget(tName string) (*types.TargetConfig, error) {\n\tif tName == \"\" {\n\t\treturn nil, fmt.Errorf(\"target name is empty\")\n\t}\n\tif p.prefixTargetTpl != nil {\n\t\ttc := &types.TargetConfig{\n\t\t\tName:       p.Target,\n\t\t\tAddress:    p.Target,\n\t\t\tSkipVerify: pointer.ToBool(p.SkipVerify),\n\t\t\tTimeout:    10 * time.Second,\n\t\t}\n\t\tif !p.SkipVerify {\n\t\t\ttc.Insecure = pointer.ToBool(true)\n\t\t}\n\t\treturn tc, nil\n\t}\n\tif tc, ok := p.targetsConfigs[tName]; ok {\n\t\treturn tc, nil\n\t}\n\treturn nil, fmt.Errorf(\"unknown target %s\", tName)\n}\n\nfunc (p *gNMIGetProcessor) createGetRequest(e *formatters.EventMsg) (*gnmi.GetRequest, map[string]string, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, 3)\n\tgnmiOpts = append(gnmiOpts, api.Encoding(p.Encoding))\n\tgnmiOpts = append(gnmiOpts, api.DataType(p.Type))\n\n\tvar err error\n\tb := new(bytes.Buffer)\n\tif p.prefixTargetTpl != nil {\n\t\terr = p.prefixTargetTpl.Execute(b, e)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"prefix-target parse error: %v\", err)\n\t\t}\n\t\tps := b.String()\n\t\tgnmiOpts = append(gnmiOpts, api.Target(ps))\n\t}\n\n\tpathToKey := map[string]string{}\n\tfor _, ptt := range p.Paths {\n\t\tb.Reset()\n\t\terr = ptt.pathTpl.Execute(b, e)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"path parse error: %v\", err)\n\t\t}\n\t\tps := b.String()\n\t\tgnmiOpts = append(gnmiOpts, api.Path(ps))\n\t\tpathToKey[ps] = ptt.TagName\n\t}\n\treq, err := api.NewGetRequest(gnmiOpts...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn req, pathToKey, nil\n}\n\nfunc (p *gNMIGetProcessor) extractTags(rsp *gnmi.GetResponse, mapping map[string]string) map[string]string {\n\trs := map[string]string{}\n\tfor _, n := range rsp.GetNotification() {\n\t\tfor _, upd := range n.GetUpdate() {\n\n\t\t\txp := gpath.GnmiPathToXPath(upd.GetPath(), false)\n\t\t\tp.logger.Debug(\"path\", \"xp\", xp, \"v\", upd.GetVal())\n\t\t\tif k, ok := mapping[xp]; ok {\n\t\t\t\trs[k] = extractValue(upd.GetVal())\n\t\t\t}\n\t\t}\n\t}\n\treturn rs\n}\n\nfunc extractValue(tv *gnmi.TypedValue) string {\n\tswitch tv.Value.(type) {\n\tcase *gnmi.TypedValue_AsciiVal:\n\t\treturn tv.GetAsciiVal()\n\tcase *gnmi.TypedValue_BoolVal:\n\t\treturn fmt.Sprintf(\"%t\", tv.GetBoolVal())\n\tcase *gnmi.TypedValue_BytesVal:\n\t\treturn string(tv.GetBytesVal())\n\tcase *gnmi.TypedValue_DecimalVal:\n\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\tv := tv.GetDecimalVal()\n\t\tf := float64(v.Digits) / math.Pow10(int(v.Precision))\n\t\treturn strconv.FormatFloat(f, 'e', -1, 64)\n\tcase *gnmi.TypedValue_FloatVal:\n\t\t//lint:ignore SA1019 still need GetFloatVal for backward compatibility\n\t\treturn strconv.FormatFloat(float64(tv.GetFloatVal()), 'e', -1, 64)\n\tcase *gnmi.TypedValue_DoubleVal:\n\t\treturn strconv.FormatFloat(tv.GetDoubleVal(), 'e', -1, 64)\n\tcase *gnmi.TypedValue_IntVal:\n\t\treturn strconv.Itoa(int(tv.GetIntVal()))\n\tcase *gnmi.TypedValue_StringVal:\n\t\treturn tv.GetStringVal()\n\tcase *gnmi.TypedValue_UintVal:\n\t\treturn strconv.Itoa(int(tv.GetUintVal()))\n\tcase *gnmi.TypedValue_LeaflistVal:\n\t\t// TODO:\n\tcase *gnmi.TypedValue_ProtoBytes:\n\t\treturn string(tv.GetProtoBytes()) // ?\n\tcase *gnmi.TypedValue_AnyVal:\n\t\treturn string(tv.GetAnyVal().GetValue()) // ?\n\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\tjsondata := tv.GetJsonIetfVal()\n\t\treturn string(jsondata)\n\tcase *gnmi.TypedValue_JsonVal:\n\t\tjsondata := tv.GetJsonVal()\n\t\treturn string(jsondata)\n\t}\n\treturn \"\"\n}\n\nfunc main() {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tOutput:      os.Stderr,\n\t\tDisableTime: true,\n\t})\n\n\tlogger.Info(\"starting plugin processor\", \"name\", processorType)\n\n\tplug := &gNMIGetProcessor{\n\t\tm:    new(sync.RWMutex),\n\t\tvals: make(map[string]*readValues),\n\t}\n\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: logger,\n\t})\n}\n"
  },
  {
    "path": "examples/plugins/go-event-plugin/event-go-plugin.go",
    "content": "package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n)\n\nconst (\n\tprocessorType = \"event-go-plugin\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\ntype goSampleProcessorPlugin struct {\n\tformatters.BaseProcessor\n\tDebug bool `mapstructure:\"debug,omitempty\" yaml:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttargetsConfigs        map[string]*types.TargetConfig\n\tactionsDefinitions    map[string]map[string]interface{}\n\tprocessorsDefinitions map[string]map[string]any\n\tlogger                hclog.Logger\n}\n\nfunc (p *goSampleProcessorPlugin) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\t// initialize logger\n\tp.logger.Info(\"initializing\", \"processor\", processorType, \"cfg\", cfg)\n\t// initialize your processor's config and handle the options\n\t// - set default\n\t// - validate config\n\tp.logger.Info(\"initialized\", \"processor\", processorType)\n\treturn nil\n}\n\nfunc (p *goSampleProcessorPlugin) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\t// apply the processor's logic here\n\t// return the new/modified event messages\n\treturn event\n}\n\nfunc (p *goSampleProcessorPlugin) Close() error {\n\treturn nil\n}\n\nfunc (p *goSampleProcessorPlugin) WithActions(act map[string]map[string]interface{}) {\n\tp.actionsDefinitions = act\n}\n\nfunc (p *goSampleProcessorPlugin) WithTargets(tcs map[string]*types.TargetConfig) {\n\tp.targetsConfigs = tcs\n}\n\nfunc (p *goSampleProcessorPlugin) WithProcessors(procs map[string]map[string]any) {\n\tp.processorsDefinitions = procs\n}\n\nfunc (p *goSampleProcessorPlugin) WithLogger(l *log.Logger) {\n}\n\nfunc main() {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tOutput:      os.Stderr,\n\t\tDisableTime: true,\n\t})\n\n\tlogger.Info(\"starting plugin processor\", \"name\", processorType)\n\n\tplug := &goSampleProcessorPlugin{}\n\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: logger,\n\t})\n}\n"
  },
  {
    "path": "examples/plugins/minimal/event-my-processor.go",
    "content": "package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n)\n\nconst (\n\t// TODO: Choose a name for your processor\n\tprocessorType = \"event-my-processor\"\n)\n\ntype myProcessor struct {\n\tformatters.BaseProcessor\n\t// TODO: Add your config struct fields here\n}\n\nfunc (p *myProcessor) Init(cfg interface{}, opts ...formatters.Option) error {\n\t// decode the plugin config\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// apply options\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\t// TODO: Other initialization steps...\n\treturn nil\n}\n\nfunc (p *myProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\t// TODO: The processor's logic is applied here\n\treturn event\n}\n\nfunc (p *myProcessor) Close() error {\n\treturn nil\n}\n\nfunc (p *myProcessor) WithActions(act map[string]map[string]interface{}) {\n}\n\nfunc (p *myProcessor) WithTargets(tcs map[string]*types.TargetConfig) {\n}\n\nfunc (p *myProcessor) WithProcessors(procs map[string]map[string]any) {\n}\n\nfunc (p *myProcessor) WithLogger(l *log.Logger) {\n}\n\nfunc main() {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tOutput:      os.Stderr,\n\t\tDisableTime: true,\n\t})\n\n\tlogger.Info(\"starting plugin processor\", \"name\", processorType)\n\n\t// TODO: Create and initialize your processor's struct\n\tplug := &myProcessor{}\n\t// start it\n\tplugin.Serve(&plugin.ServeConfig{\n\t\tHandshakeConfig: plugin.HandshakeConfig{\n\t\t\tProtocolVersion:  1,\n\t\t\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\t\t\tMagicCookieValue: \"gnmic\",\n\t\t},\n\t\tPlugins: map[string]plugin.Plugin{\n\t\t\tprocessorType: &event_plugin.EventProcessorPlugin{Impl: plug},\n\t\t},\n\t\tLogger: logger,\n\t})\n}\n"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/1.interfaces/interfaces_template.gotmpl",
    "content": "{{ $target := index .Vars .TargetName }}\nupdates:\n{{- range $interface := $target.interfaces }}\n  - path: \"/interface[name={{ $interface.name }}]\"\n    encoding: \"json_ietf\"\n    value:\n      admin-state: {{ $interface.admin_state | default \"disable\" }}\n      description: {{ $interface.description | default \"\" }}\n      {{- if $interface.mtu }}\n      mtu: {{ $interface.mtu }}\n      {{- end }}\n      {{- if $interface.vlan_tagging }}\n      vlan-tagging: {{ $interface.vlan_tagging }}\n      {{- end }}\n      {{- if $ethernet := $interface.ethernet }}\n      ethernet:\n        {{- if $ethernet.aggregate_id }}\n        aggregate-id: {{ $ethernet.aggregate_id }}\n        {{- end }}\n        {{- if $ethernet.auto_negotiate }}\n        auto-negotiate: {{ $ethernet.auto_negotiate }}\n        {{- end }}\n        {{- if $ethernet.duplex_mode }}\n        duplex-mode: {{ $ethernet.duplex_mode }}\n        {{- end }}\n        {{- if $ethernet.flow_control.receive }}\n        flow-control: \n          receive: {{ $ethernet.flow_control.receive }}\n        {{- end }}\n      {{- end }}\n      {{- if $interface.lag }}\n      lag:\n        {{- if $interface.lag.lag_type }}\n        lag-type: {{ $interface.lag.lag_type }}\n        {{- end }}\n        {{- if $interface.lag.min_links }}\n        min-links: {{ $interface.lag.min_links }}\n        {{- end }}\n        {{- if $interface.lag.member_speed }}\n        member-speed: {{ $interface.lag.member_speed }}\n        {{- end }}\n        {{- if $interface.lag.lacp_fallback_mode }}\n        lacp-fallback-mode: {{ $interface.lag.lacp_fallback_mode }}\n        {{- end }}\n        {{- if $interface.lag.lacp_fallback_timeout }}\n        lacp-fallback-timeout: {{ $interface.lag.lacp_fallback_timeout }}\n        {{- end }}\n        {{- if $interface.lag.lag_speed }}\n        lag-speedt: {{ $interface.lag.lag_speed }}\n        {{- end }}\n      {{- end }}\n{{- end }}\n"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/1.interfaces/interfaces_template_vars.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nnode1:\n  interfaces:\n    - name: # ethernet-1/1\n      admin_state: # \"enable\" | \"disable\"\n      description:\n      vlan_tagging: # true || false\n      mtu: \n      loopback-mode: # true || false\n      ethernet:\n        aggregate_id:\n        auto_negotiate:\n        duplex_mode: \n        flow_control:\n           receive: # true || false \n      lag:\n        lag_type:\n        min_links:\n        member_speed:\n        lacp_fallback_mode:\n        lacp_fallback_timeout:\n        lag_speed:\n      subinterface:\n        - admin_state: # \"enable\" | \"disable\"\n          ipv4_address:\n          ipv6_address:\n          vlan_id:\n          untagged: {}\n          acl:\n            input:\n              ipv4_filter:\n              ipv6_filter:\n            output:\n              ipv4_filter:\n              ipv6_filter:"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/1.interfaces/subinterfaces_template.gotmpl",
    "content": "updates:\n{{ $target := index .Vars .TargetName }}\n{{- range $interface := $target.interfaces }}\n  {{- range $idx, $subinterface := $interface.subinterface }}\n  - path: \"/interface[name={{ $interface.name }}]/subinterface[index={{ $idx }}]\"\n    encoding: \"json_ietf\"\n    value: \n      admin-state: {{ $subinterface.admin_state | default \"disable\" }}\n      {{- if $subinterface.type }}\n      type: {{ $subinterface.type }}\n      {{- end }}\n      {{- if $subinterface.description }}\n      description: {{ $subinterface.description }}\n      {{- end }}\n      {{- if $subinterface.ip_mtu }}\n      ip-mtu: {{ $subinterface.ip_mtu }}\n      {{- end }}\n      {{- if $subinterface.ipv4_address }}\n      ipv4:\n        address:\n          - ip-prefix: {{ $subinterface.ipv4_address }}\n      {{- end }}\n      {{- if $subinterface.ipv6_address }}\n      ipv6:\n        address:\n          - ip-prefix: {{ $subinterface.ipv6_address }}\n      {{- end }}\n      {{- if $subinterface.vlan_id }}\n      vlan:\n        encap:\n          single-tagged:\n            vlan-id: {{ $subinterface.vlan_id }}\n      {{- else if $subinterface.untagged }}\n      vlan:\n        encap:\n          untagged: {}\n      {{- end }}\n      {{- if $acl := $subinterface.acl }}\n      acl:\n        {{- if $input := $acl.input }}\n        input:\n          {{- if $input.ipv4_filter }}\n          ipv4-filter: {{ $input.ipv4_filter }}\n          {{- end }}\n          {{- if $input.ipv6_filter }}\n          ipv6-filter: {{ $acl.input.ipv6_filter }}\n        {{- end }}\n        {{- end }}\n        {{- if $output := $acl.output }}\n        output:\n          {{- if $output.ipv4_filter }}\n          ipv4-filter: {{ $output.ipv4_filter }}\n          {{- end }}\n          {{- if $output.ipv6_filter }}\n          ipv6-filter: {{ $output.ipv6_filter }}\n          {{- end }}\n        {{- end }}\n      {{- end }}\n    {{- end }}\n{{- end }}\n"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_evpn_template.gotmpl",
    "content": "replaces:\n{{- range $netins := index .Vars .TargetName \"network-instances\" }}\n  {{- if $bgpevpn := index $netins \"protocols\" \"bgp-evpn\" }}\n  - path: \"/network-instance[name={{ index $netins \"name\" }}/protocols/bgp-evpn]\"\n    encoding: json_ietf\n    value: \n      bgp-instance\n        - id: 1\n          admin-state: {{ index $bgpevpn \"admin-state\" | default \"disable\" }}\n          default-admin-tag: {{ index $bgpevpn \"default-admin-tag\" | default 0 }}\n          encapsulation-type: {{ index $bgpevpn \"encapsulation-type\" | default \"vxlan\" }}\n          {{- if index $bgpevpn \"vxlan-interface\" }}\n          vxlan-interface: {{ index $bgpevpn \"vxlan-interface\" }}\n          {{- end }}\n          {{- if index $bgpevpn \"evi\" }}\n          evi: {{ index $bgpevpn \"evi\" }}\n          {{- end }}\n          ecmp: {{ index $bgpevpn \"ecmp\" | default 1 }}\n          {{- if $routes := index $bgpevpn \"routes\" }}\n          routes:\n            {{- if $routetable := index $routes \"route-table\" }}\n            route-table:\n              mac-ip:\n                advertise-gateway-mac: {{ index $routetable \"mac-ip\" \"advertise-gateway-mac\" | default false }}\n            {{- end }}\n            {{- if $bridgetable := index $routes \"bridge-table\" }}             \n            bridge-table:\n              mac-ip:\n                advertise: {{ index $bridgetable \"mac-ip\" \"advertise\" | default false }}\n              inclusive-mcast:\n                advertise: {{ index $bridgetable \"inclusive-mcast\" \"advertise\" | default true }}\n                {{- if index $bridgetable \"inclusive-mcast\" \"originating-ip\" }}\n                originating-ip: {{ index $bridgetable \"inclusive-mcast\" \"originating-ip\" }}\n                {{- end }}\n              {{- if index $bridgetable \"next-hop\" }}\n              next-hop: {{ index $bridgetable \"next-hop\" }}\n              {{- end }}\n            {{- end }}\n          {{- end }}\n  {{- end }}    \n{{- end }}"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_template.gotmpl",
    "content": "replaces:\n{{- range $netins := index .Vars .TargetName \"network-instances\" }}\n  {{- if $bgp := index $netins \"protocols\" \"bgp\" }}\n  - path: \"/network-instance[name={{ index $netins \"name\" }}/protocols/bgp]\"\n    encoding: json_ietf\n    value: \n      admin-state: {{ index $bgp \"admin-state\" | default \"disable\" }}\n      \n  {{- end }}    \n{{- end }}"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_vpn_template.gotmpl",
    "content": "replaces:\n{{- range $netins := index .Vars .TargetName \"network-instances\" }}\n  {{- if $bgpvpn := index $netins \"protocols\" \"bgp-vpn\" }}\n  - path: \"/network-instance[name={{ index $netins \"name\" }}/protocols/bgp-vpn]\"\n    encoding: json_ietf\n    value: \n      bgp-instance:\n     {{- range $idx, $bgpins := $bgpvpn}} \n      - id: {{ $idx }}\n        admin-state: {{ index $bgpvpn \"admin-state\" | default \"disable\" }}\n        default-admin-tag: {{ index $bgpvpn \"default-admin-tag\" | default 0 }}\n      {{- end }}\n  {{- end }}    \n{{- end }}"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_template.gotmpl",
    "content": "replaces:\n{{- range $netins := index .Vars .TargetName \"network-instances\" }}\n  - path: \"/network-instance[name={{ index $netins \"name\" }}]\"\n    encoding: json_ietf\n    value: \n      admin-state: {{ index $netins \"admin-state\" | default \"disable\" }}\n      description: {{ index $netins \"description\" | default \"\" }}\n      type: {{ index $netins \"type\" | default \"default\" }}\n      {{- if index $netins \"path-mtu-disovery\" }}\n      mtu:\n        path-mtu-discovery: {{ index $netins \"path-mtu-disovery\" }}\n      {{- end }}\n      {{- if index $netins \"router-id\" }}\n      router-id: {{ index $netins \"router-id\" }}\n      {{- end }}\n      {{ $interfaces := index $netins \"interfaces\" }}\n      {{- if ne (len $interfaces) 0 }}\n      interface:\n        {{- range $interface := $interfaces }}\n        - name: {{ $interface }}\n        {{- end }}\n      {{- end }}\n      {{- if index $netins \"vxlan-interface\" }}\n      vxlan-interface: {{ index $netins \"vxlan-interface\" }} \n      {{- end }}\n      {{- if index $netins \"ip-forwarding\" }}\n      ip-forwarding:\n        receive-ipv4-check: {{ index $netins \"ip-forwarding\" \"receive-ipv4-check\" | default false }} \n        receive-ipv6-check: {{ index $netins \"ip-forwarding\" \"receive-ipv6-check\" | default false }} \n      {{- end }}\n      {{ $protocols := index $netins \"protocols\" }}\n      {{- if ne (len $protocols) 0 }}\n      protocols:\n      {{- end }}\n{{- end }}"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_template_vars.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nnode1:57400:\n  network-intances:\n  - name: \"\"\n    admin-state: enable\n    aggregate-routes:\n      route:\n      - admin-state: enable\n        aggregator:\n          address: \"\"\n          as-number: \"\"\n        communities:\n          add: \"\"\n        generate-icmp: \"\"\n        prefix: \"\"\n        summary-only: \"false\"\n    bridge-table:\n      discard-unknown-dest-mac: \"false\"\n      mac-duplication:\n        action: stop-learning\n        admin-state: enable\n        hold-down-time: \"9\"\n        monitoring-window: \"3\"\n        num-moves: \"5\"\n      mac-learning:\n        admin-state: enable\n        aging:\n          admin-state: enable\n          age-time: \"300\"\n      mac-limit:\n        maximum-entries: \"250\"\n        warning-threshold-pct: \"95\"\n      protect-anycast-gw-mac: \"false\"\n      static-mac:\n        mac:\n        - address: \"\"\n          destination: \"\"\n    description: \"\"\n    interface:\n    - name: \"\"\n    ip-forwarding:\n      receive-ipv4-check: \"\"\n      receive-ipv6-check: \"\"\n    ip-load-balancing:\n      resilient-hash-prefix:\n      - hash-buckets-per-path: \"1\"\n        ip-prefix: \"\"\n        max-paths: \"1\"\n    mpls:\n      admin-state: disable\n      static-mpls-entry:\n      - collect-stats: \"false\"\n        next-hop-group: \"\"\n        operation: swap\n        preference: \"5\"\n        top-label: \"\"\n      ttl-propagation: \"false\"\n    mtu:\n      path-mtu-discovery: \"\"\n    next-hop-groups:\n      group:\n      - admin-state: enable\n        blackhole:\n          generate-icmp: \"false\"\n        name: \"\"\n        nexthop:\n        - admin-state: enable\n          failure-detection:\n            enable-bfd:\n              local-address: \"\"\n              local-discriminator: \"\"\n              remote-discriminator: \"\"\n          index: \"\"\n          ip-address: \"\"\n          pushed-mpls-label-stack: \"\"\n          resolve: \"true\"\n    protocols:\n      bgp:\n        admin-state: enable\n        as-path-options:\n          allow-own-as: \"0\"\n          remove-private-as:\n            ignore-peer-as: \"false\"\n            leading-only: \"false\"\n            mode: disabled\n        authentication:\n          keychain: \"\"\n        autonomous-system: \"\"\n        convergence:\n          min-wait-to-advertise: \"0\"\n        dynamic-neighbors:\n          accept:\n            match:\n            - allowed-peer-as: \"\"\n              peer-group: \"\"\n              prefix: \"\"\n            max-sessions: \"0\"\n        ebgp-default-policy:\n          export-reject-all: \"true\"\n          import-reject-all: \"true\"\n        evpn:\n          admin-state: disable\n          advertise-ipv6-next-hops: \"false\"\n          keep-all-routes: \"\"\n          rapid-update: \"false\"\n        export-policy: \"\"\n        failure-detection:\n          enable-bfd: \"false\"\n          fast-failover: \"true\"\n        graceful-restart:\n          admin-state: disable\n          stale-routes-time: \"360\"\n        group:\n        - admin-state: enable\n          as-path-options:\n            allow-own-as: \"\"\n            remove-private-as:\n              ignore-peer-as: \"false\"\n              leading-only: \"false\"\n              mode: \"\"\n            replace-peer-as: \"\"\n          authentication:\n            keychain: \"\"\n          description: \"\"\n          evpn:\n            admin-state: \"\"\n            advertise-ipv6-next-hops: \"\"\n            prefix-limit:\n              max-received-routes: \"4294967295\"\n              warning-threshold-pct: \"90\"\n          export-policy: \"\"\n          failure-detection:\n            enable-bfd: \"\"\n            fast-failover: \"\"\n          graceful-restart:\n            admin-state: \"\"\n            stale-routes-time: \"\"\n          group-name: \"\"\n          import-policy: \"\"\n          ipv4-unicast:\n            admin-state: \"\"\n            advertise-ipv6-next-hops: \"\"\n            prefix-limit:\n              max-received-routes: \"4294967295\"\n              warning-threshold-pct: \"90\"\n            receive-ipv6-next-hops: \"\"\n          ipv6-unicast:\n            admin-state: \"\"\n            prefix-limit:\n              max-received-routes: \"4294967295\"\n              warning-threshold-pct: \"90\"\n          local-as:\n          - as-number: \"\"\n            prepend-global-as: \"true\"\n            prepend-local-as: \"true\"\n          local-preference: \"\"\n          next-hop-self: \"false\"\n          peer-as: \"\"\n          route-reflector:\n            client: \"\"\n            cluster-id: \"\"\n          send-community:\n            large: \"\"\n            standard: \"\"\n          send-default-route:\n            export-policy: \"\"\n            ipv4-unicast: \"false\"\n            ipv6-unicast: \"false\"\n          timers:\n            connect-retry: \"120\"\n            hold-time: \"90\"\n            keepalive-interval: \"\"\n            minimum-advertisement-interval: \"5\"\n          trace-options:\n            flag:\n            - modifier: \"\"\n              name: \"\"\n          transport:\n            local-address: \"\"\n            passive-mode: \"false\"\n            tcp-mss: \"\"\n        import-policy: \"\"\n        ipv4-unicast:\n          admin-state: enable\n          advertise-ipv6-next-hops: \"false\"\n          convergence:\n            max-wait-to-advertise: \"0\"\n          multipath:\n            allow-multiple-as: \"true\"\n            max-paths-level-1: \"1\"\n            max-paths-level-2: \"1\"\n          receive-ipv6-next-hops: \"false\"\n        ipv6-unicast:\n          admin-state: disable\n          convergence:\n            max-wait-to-advertise: \"0\"\n          multipath:\n            allow-multiple-as: \"true\"\n            max-paths-level-1: \"1\"\n            max-paths-level-2: \"1\"\n        local-preference: \"100\"\n        neighbor:\n        - admin-state: enable\n          as-path-options:\n            allow-own-as: \"\"\n            remove-private-as:\n              ignore-peer-as: \"false\"\n              leading-only: \"false\"\n              mode: \"\"\n            replace-peer-as: \"\"\n          authentication:\n            keychain: \"\"\n          description: \"\"\n          evpn:\n            admin-state: \"\"\n            advertise-ipv6-next-hops: \"\"\n            prefix-limit:\n              max-received-routes: \"\"\n              warning-threshold-pct: \"\"\n          export-policy: \"\"\n          failure-detection:\n            enable-bfd: \"\"\n            fast-failover: \"\"\n          graceful-restart:\n            admin-state: \"\"\n            stale-routes-time: \"\"\n            warm-restart:\n              admin-state: \"\"\n          import-policy: \"\"\n          ipv4-unicast:\n            admin-state: \"\"\n            advertise-ipv6-next-hops: \"\"\n            prefix-limit:\n              max-received-routes: \"\"\n              warning-threshold-pct: \"\"\n            receive-ipv6-next-hops: \"\"\n          ipv6-unicast:\n            admin-state: \"\"\n            prefix-limit:\n              max-received-routes: \"\"\n              warning-threshold-pct: \"\"\n          local-as:\n          - as-number: \"\"\n            prepend-global-as: \"\"\n            prepend-local-as: \"\"\n          local-preference: \"\"\n          next-hop-self: \"\"\n          peer-address: \"\"\n          peer-as: \"\"\n          peer-group: \"\"\n          route-reflector:\n            client: \"\"\n            cluster-id: \"\"\n          send-community:\n            large: \"\"\n            standard: \"\"\n          send-default-route:\n            export-policy: \"\"\n            ipv4-unicast: \"\"\n            ipv6-unicast: \"\"\n          timers:\n            connect-retry: \"\"\n            hold-time: \"\"\n            keepalive-interval: \"\"\n            minimum-advertisement-interval: \"\"\n          trace-options:\n            flag:\n            - modifier: \"\"\n              name: \"\"\n          transport:\n            local-address: \"\"\n            passive-mode: \"\"\n            tcp-mss: \"\"\n        preference:\n          ebgp: \"170\"\n          ibgp: \"170\"\n        route-advertisement:\n          rapid-withdrawal: \"false\"\n          wait-for-fib-install: \"true\"\n        route-reflector:\n          client: \"false\"\n          cluster-id: \"\"\n        router-id: \"\"\n        send-community:\n          large: \"true\"\n          standard: \"true\"\n        trace-options:\n          flag:\n          - modifier: \"\"\n            name: \"\"\n        transport:\n          tcp-mss: \"1024\"\n      bgp-evpn:\n        bgp-instance:\n        - admin-state: enable\n          default-admin-tag: \"0\"\n          ecmp: \"1\"\n          encapsulation-type: vxlan\n          evi: \"\"\n          id: \"\"\n          routes:\n            bridge-table:\n              inclusive-mcast:\n                advertise: \"true\"\n                originating-ip: \"\"\n              mac-ip:\n                advertise: \"true\"\n              next-hop: use-system-ipv4-address\n            route-table:\n              mac-ip:\n                advertise-gateway-mac: \"false\"\n          vxlan-interface: \"\"\n      bgp-vpn:\n        bgp-instance:\n        - export-policy: \"\"\n          id: \"\"\n          import-policy: \"\"\n          route-distinguisher:\n            rd: \"\"\n          route-target:\n            export-rt: \"\"\n            import-rt: \"\"\n      directly-connected:\n        te-database-install:\n          bgp-ls:\n            bgp-ls-identifier: \"\"\n            igp-identifier: \"\"\n      isis:\n        instance:\n        - admin-state: disable\n          attached-bit:\n            ignore: \"false\"\n            suppress: \"false\"\n          authentication:\n            csnp-authentication: \"\"\n            hello-authentication: \"\"\n            keychain: \"\"\n            psnp-authentication: \"\"\n          auto-cost:\n            reference-bandwidth: \"\"\n          export-policy: \"\"\n          graceful-restart:\n            helper-mode: \"false\"\n          inter-level-propagation-policies:\n            level1-to-level2:\n              summary-address:\n              - ip-prefix: \"\"\n                route-tag: \"\"\n          interface:\n          - admin-state: enable\n            authentication:\n              hello-authentication: \"\"\n              keychain: \"\"\n            circuit-type: \"\"\n            hello-padding: disable\n            interface-name: \"\"\n            ipv4-unicast:\n              admin-state: enable\n              enable-bfd: \"false\"\n              include-bfd-tlv: \"false\"\n            ipv6-unicast:\n              admin-state: enable\n              enable-bfd: \"false\"\n              include-bfd-tlv: \"false\"\n            ldp-synchronization:\n              disable: \"\"\n              end-of-lib: \"\"\n              hold-down-timer: \"\"\n            level:\n            - authentication:\n                keychain: \"\"\n              disable: \"false\"\n              ipv6-unicast-metric: \"\"\n              level-number: \"\"\n              metric: \"\"\n              priority: \"64\"\n              timers:\n                hello-interval: \"9\"\n                hello-multiplier: \"3\"\n            passive: \"false\"\n            timers:\n              csnp-interval: \"10\"\n              lsp-pacing-interval: \"100\"\n            trace-options:\n              trace: \"\"\n          ipv4-unicast:\n            admin-state: enable\n          ipv6-unicast:\n            admin-state: enable\n            multi-topology: \"false\"\n          ldp-synchronization:\n            end-of-lib: \"false\"\n            hold-down-timer: \"60\"\n          level:\n          - authentication:\n              csnp-authentication: \"\"\n              hello-authentication: \"\"\n              keychain: \"\"\n              psnp-authentication: \"\"\n            bgp-ls-exclude: \"false\"\n            level-number: \"\"\n            metric-style: wide\n            route-preference:\n              external: \"\"\n              internal: \"\"\n            trace-options:\n              trace: \"\"\n          level-capability: L2\n          max-ecmp-paths: \"1\"\n          name: \"\"\n          net: \"\"\n          overload:\n            advertise-external: \"false\"\n            advertise-interlevel: \"false\"\n            immediate:\n              max-metric: \"false\"\n              set-bit: \"false\"\n            on-boot:\n              max-metric: \"\"\n              set-bit: \"\"\n              timeout: \"\"\n          poi-tlv: \"false\"\n          te-database-install:\n            bgp-ls:\n              bgp-ls-identifier: \"\"\n              igp-identifier: \"\"\n          timers:\n            lsp-generation:\n              initial-wait: \"10\"\n              max-wait: \"5000\"\n              second-wait: \"1000\"\n            lsp-lifetime: \"1200\"\n            lsp-refresh:\n              half-lifetime: \"true\"\n              interval: \"600\"\n            spf:\n              initial-wait: \"1000\"\n              max-wait: \"10000\"\n              second-wait: \"1000\"\n          trace-options:\n            trace: \"\"\n          traffic-engineering:\n            advertisement: \"false\"\n            legacy-link-attribute-advertisement: \"true\"\n          transport:\n            lsp-mtu-size: \"1492\"\n      ldp:\n        admin-state: disable\n        discovery:\n          interfaces:\n            hello-holdtime: \"15\"\n            hello-interval: \"5\"\n            interface:\n            - admin-state: \"\"\n              hello-holdtime: \"15\"\n              hello-interval: \"5\"\n              ipv4:\n                admin-state: enable\n              name: \"\"\n        dynamic-label-block: \"\"\n        graceful-restart:\n          helper-enable: \"false\"\n          max-reconnect-time: \"120\"\n          max-recovery-time: \"120\"\n        ipv4:\n          fec-resolution:\n            longest-prefix: \"false\"\n        multipath:\n          max-paths: \"\"\n        peers:\n          peer:\n          - ipv4:\n              fec-limit: \"\"\n            label-space-id: \"\"\n            lsr-id: \"\"\n            tcp-transport:\n              authentication:\n                keychain: \"\"\n          session-keepalive-holdtime: \"180\"\n          session-keepalive-interval: \"60\"\n          tcp-transport:\n            authentication:\n              keychain: \"\"\n        trace-options:\n          interface:\n          - name: \"\"\n          peer:\n          - label-space-id: \"\"\n            lsr-id: \"\"\n      linux:\n        export-neighbors: \"true\"\n        export-routes: \"false\"\n        import-routes: \"false\"\n      ospf:\n        instance:\n        - address-family: \"\"\n          admin-state: disable\n          advertise-router-capability: \"\"\n          area:\n          - advertise-router-capability: \"true\"\n            area-id: \"\"\n            area-range:\n            - advertise: \"true\"\n              ip-prefix-mask: \"\"\n            bgp-ls-exclude: \"false\"\n            blackhole-aggregate: \"true\"\n            export-policy: \"\"\n            interface:\n            - admin-state: enable\n              advertise-router-capability: \"true\"\n              advertise-subnet: \"true\"\n              authentication:\n                keychain: \"\"\n              dead-interval: \"40\"\n              failure-detection:\n                enable-bfd: \"false\"\n              hello-interval: \"10\"\n              interface-name: \"\"\n              interface-type: \"\"\n              lsa-filter-out: none\n              metric: \"\"\n              mtu: \"\"\n              passive: \"\"\n              priority: \"1\"\n              retransmit-interval: \"5\"\n              trace-options:\n                trace:\n                  adjacencies: \"\"\n                  interfaces: \"\"\n                  packet:\n                    detail: \"\"\n                    modifier: \"\"\n                    type: \"\"\n              transit-delay: \"1\"\n            nssa:\n              area-range:\n              - advertise: \"true\"\n                ip-prefix-mask: \"\"\n              originate-default-route:\n                adjacency-check: \"true\"\n                type-nssa: \"false\"\n              redistribute-external: \"\"\n              summaries: \"\"\n            stub:\n              default-metric: \"1\"\n              summaries: \"\"\n          asbr:\n            trace-path: none\n          export-limit:\n            log-percent: \"\"\n            number: \"\"\n          export-policy: \"\"\n          external-db-overflow:\n            interval: \"0\"\n            limit: \"0\"\n          external-preference: \"150\"\n          graceful-restart:\n            helper-mode: \"false\"\n            strict-lsa-checking: \"false\"\n          instance-id: \"\"\n          max-ecmp-paths: \"1\"\n          name: \"\"\n          overload:\n            active: \"false\"\n            overload-include-ext-1: \"false\"\n            overload-include-ext-2: \"false\"\n            overload-include-stub: \"false\"\n            overload-on-boot:\n              timeout: \"60\"\n            rtr-adv-lsa-limit:\n              log-only: \"\"\n              max-lsa-count: \"\"\n              overload-timeout: \"\"\n              warning-threshold: \"0\"\n          preference: \"10\"\n          reference-bandwidth: \"400000000\"\n          router-id: \"\"\n          te-database-install:\n            bgp-ls:\n              bgp-ls-identifier: \"\"\n              igp-identifier: \"\"\n          timers:\n            incremental-spf-wait: \"1000\"\n            lsa-accumulate: \"1000\"\n            lsa-arrival: \"1000\"\n            lsa-generate:\n              lsa-initial-wait: \"5000\"\n              lsa-second-wait: \"5000\"\n              max-lsa-wait: \"5000\"\n            redistribute-delay: \"1000\"\n            spf-wait:\n              spf-initial-wait: \"1000\"\n              spf-max-wait: \"10000\"\n              spf-second-wait: \"1000\"\n          trace-options:\n            trace:\n              adjacencies: \"\"\n              graceful-restart: \"\"\n              interfaces: \"\"\n              lsdb:\n                link-state-id: \"\"\n                router-id: \"\"\n                type: \"\"\n              misc: \"\"\n              packet:\n                detail: \"\"\n                modifier: \"\"\n                type: \"\"\n              routes:\n                dest-address: \"\"\n              spf:\n                dest-address: \"\"\n          traffic-engineering:\n            advertisement: \"false\"\n            legacy-link-attribute-advertisement: \"true\"\n          version: \"\"\n    router-id: \"\"\n    static-routes:\n      route:\n      - admin-state: enable\n        metric: \"1\"\n        next-hop-group: \"\"\n        preference: \"5\"\n        prefix: \"\"\n    traffic-engineering:\n      admin-groups:\n        group:\n        - bit-position: \"\"\n          name: \"\"\n      autonomous-system: \"\"\n      interface:\n      - admin-group: \"\"\n        delay:\n          static: \"\"\n        interface-name: \"\"\n        srlg-membership: \"\"\n        te-metric: \"\"\n      ipv4-te-router-id: \"\"\n      ipv6-te-router-id: \"\"\n      shared-risk-link-groups:\n        group:\n        - cost: \"\"\n          name: \"\"\n          static-member:\n          - from-address: \"\"\n            to-address: \"\"\n          value: \"\"\n    type: default\n    vxlan-interface:\n    - name: \"\"\n"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/3.acl/acl_template.gotmpl",
    "content": "replaces:\n  - path: \"/acl\"\n    encoding: \"json_ietf\"\n    value: {{ index .Vars \"acl\" }}\n"
  },
  {
    "path": "examples/set-request-templates/Nokia/SRL/3.acl/acl_template_vars.yaml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nacl:\n  capture-filter:\n    ipv4-filter:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp:\n              type:\n              code: []\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n          action:\n            accept: {}\n            copy: {}\n\n    ipv6-filter:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            icmp6:\n              type:\n              code: []\n            next-header:\n            protocol:\n            fragment:\n            first-fragment: \n          action:\n            accept: {}\n            drop: {}\n  cpm-filter:\n    ipv4-filter:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp:\n              type:\n              code: []\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n          action:\n            accept:\n              log: # true | false\n              rate-limit:\n                distributed-policer: \n                system-cpu-policer:\n            drop:\n              log: # true | false\n    ipv6-filter:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            icmp6:\n              type:\n              code: []\n            next-header:\n            protocol:\n            fragment:\n            first-fragment: \n          action:\n            accept:\n              log: # true | false\n              rate-limit:\n                distributed-policer: \n                system-cpu-policer:\n            drop:\n              log: # true | false\n  \n  ipv4-filter:\n    - name: \"\"\n      description:\n      subinterface-specific:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          action:\n            accept:\n              log: # true | false\n            drop:\n              log: # true | false\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp:\n              type:\n              code: []\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n  ipv6-filter:\n    - name: \"\"\n      description:\n      subinterface-specific:\n      statistics-per-entry: # true | false\n      entry:\n        - sequence-id:\n          description:\n          action:\n            accept:\n              log: # true | false\n            drop:\n              log: # true | false\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp6:\n              type:\n              code: []\n            next-header:\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n  policers:\n    policer:\n      - name: \"\"\n        entry-specific: # true | false\n        peak-rate: \n        max-burst:\n    system-cpu-policer:\n      - name: \"\"\n        entry-specific: # true | false\n        peak-packet-rate: \n        max-packet-burst:\n  system-filter:\n    ipv4-filter:\n      entry:\n        - sequence-id:\n          description:\n          action:\n            accept:\n            drop:\n              log: # true | false\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp:\n              type:\n              code: []\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n    ipv6-filter:\n      entry:\n        - sequence-id:\n          description:\n          action:\n            accept:\n            drop:\n              log: # true | false\n          match:\n            source-ip:\n              prefix:\n              address:\n              mask:\n            destination-ip:\n              prefix:\n              address:\n              mask:\n            source-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            destination-port:\n              operator:\n              value:\n              range:\n                start:\n                end:\n            icmp6:\n              type:\n              code: []\n            next-header:\n            tcp-flags:\n            protocol:\n            fragment:\n            first-fragment: \n  tcam-profile: # default | ipv4-egress-scaled"
  },
  {
    "path": "go.mod",
    "content": "module github.com/openconfig/gnmic\n\ngo 1.24.12\n\nreplace github.com/openconfig/gnmic/pkg/api v0.1.11 => ./pkg/api\n\nreplace github.com/openconfig/gnmic/pkg/cache v0.1.3 => ./pkg/cache\n\nrequire (\n\tgithub.com/IBM/sarama v1.46.3\n\tgithub.com/adrg/xdg v0.5.3\n\tgithub.com/c-bata/go-prompt v0.2.6\n\tgithub.com/docker/docker v28.5.1+incompatible\n\tgithub.com/fsnotify/fsnotify v1.9.0\n\tgithub.com/fullstorydev/grpcurl v1.9.3\n\tgithub.com/go-redsync/redsync/v4 v4.13.0\n\tgithub.com/go-resty/resty/v2 v2.16.5\n\tgithub.com/google/go-cmp v0.7.0\n\tgithub.com/google/uuid v1.6.0\n\tgithub.com/gorilla/handlers v1.5.2\n\tgithub.com/gorilla/mux v1.8.1\n\tgithub.com/gosnmp/gosnmp v1.42.1\n\tgithub.com/grafana/pyroscope-go v1.2.7\n\tgithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0\n\tgithub.com/guptarohit/asciigraph v0.7.3\n\tgithub.com/hairyhenderson/gomplate/v3 v3.11.8\n\tgithub.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce\n\tgithub.com/hashicorp/consul/api v1.32.0\n\tgithub.com/hashicorp/go-plugin v1.7.0\n\tgithub.com/hashicorp/golang-lru/v2 v2.0.7\n\tgithub.com/huandu/xstrings v1.5.0\n\tgithub.com/influxdata/influxdb-client-go/v2 v2.14.0\n\tgithub.com/itchyny/gojq v0.12.14\n\tgithub.com/jellydator/ttlcache/v3 v3.4.0\n\tgithub.com/jhump/protoreflect v1.17.0\n\tgithub.com/jlaffaye/ftp v0.2.0\n\tgithub.com/karimra/go-map-flattener v0.0.1\n\tgithub.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c\n\tgithub.com/manifoldco/promptui v0.9.0\n\tgithub.com/mitchellh/go-homedir v1.1.0\n\tgithub.com/mitchellh/mapstructure v1.5.0\n\tgithub.com/nats-io/nats.go v1.49.0\n\tgithub.com/nsf/termbox-go v1.1.1\n\tgithub.com/olekukonko/tablewriter v0.0.5\n\tgithub.com/openconfig/gnmi v0.14.1\n\tgithub.com/openconfig/gnmic/pkg/api v0.1.11\n\tgithub.com/openconfig/gnmic/pkg/cache v0.1.3\n\tgithub.com/openconfig/goyang v1.6.3\n\tgithub.com/openconfig/ygot v0.34.0\n\tgithub.com/pkg/sftp v1.13.9\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/prometheus/client_model v0.6.2\n\tgithub.com/prometheus/prometheus v0.306.0\n\tgithub.com/redis/go-redis/v9 v9.14.0\n\tgithub.com/spf13/cobra v1.9.1\n\tgithub.com/spf13/pflag v1.0.6\n\tgithub.com/spf13/viper v1.19.0\n\tgithub.com/stretchr/testify v1.11.1\n\tgithub.com/xdg/scram v1.0.5\n\tgithub.com/zestor-dev/zestor v0.0.2\n\tgo.opentelemetry.io/proto/otlp v1.8.0\n\tgo.starlark.net v0.0.0-20260102030733-3fee463870c9\n\tgolang.org/x/crypto v0.48.0\n\tgolang.org/x/oauth2 v0.34.0\n\tgolang.org/x/sync v0.19.0\n\tgoogle.golang.org/grpc v1.79.3\n\tgoogle.golang.org/protobuf v1.36.11\n\tgopkg.in/natefinch/lumberjack.v2 v2.2.1\n\tgopkg.in/yaml.v2 v2.4.0\n\tk8s.io/api v0.32.3\n\tk8s.io/apimachinery v0.32.3\n\tk8s.io/utils v0.0.0-20241104100929-3ea5e8cea738\n)\n\nrequire (\n\tbitbucket.org/creachadair/stringset v0.0.14 // indirect\n\tcel.dev/expr v0.25.1 // indirect\n\tcloud.google.com/go/auth v0.16.2 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect\n\tcloud.google.com/go/compute/metadata v0.9.0 // indirect\n\tcloud.google.com/go/iam v1.5.2 // indirect\n\tcloud.google.com/go/monitoring v1.24.2 // indirect\n\tdario.cat/mergo v1.0.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect\n\tgithub.com/Knetic/govaluate v3.0.0+incompatible // indirect\n\tgithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op // indirect\n\tgithub.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect\n\tgithub.com/apparentlymart/go-cidr v1.1.0 // indirect\n\tgithub.com/armon/go-radix v1.0.0 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect\n\tgithub.com/bcicen/bfstree v1.0.0 // indirect\n\tgithub.com/bufbuild/protocompile v0.14.1 // indirect\n\tgithub.com/cenkalti/backoff/v4 v4.3.0 // indirect\n\tgithub.com/clipperhouse/stringish v0.1.1 // indirect\n\tgithub.com/clipperhouse/uax29/v2 v2.3.0 // indirect\n\tgithub.com/cloudflare/circl v1.6.3 // indirect\n\tgithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect\n\tgithub.com/containerd/errdefs v1.0.0 // indirect\n\tgithub.com/containerd/errdefs/pkg v0.3.0 // indirect\n\tgithub.com/cyphar/filepath-securejoin v0.4.1 // indirect\n\tgithub.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d // indirect\n\tgithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect\n\tgithub.com/distribution/reference v0.5.0 // indirect\n\tgithub.com/emicklei/go-restful/v3 v3.11.0 // indirect\n\tgithub.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect\n\tgithub.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect\n\tgithub.com/fxamacker/cbor/v2 v2.7.0 // indirect\n\tgithub.com/go-jose/go-jose/v4 v4.1.3 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/go-openapi/jsonpointer v0.21.0 // indirect\n\tgithub.com/go-openapi/jsonreference v0.21.0 // indirect\n\tgithub.com/go-openapi/swag v0.23.0 // indirect\n\tgithub.com/go-redis/redis/v8 v8.11.5 // indirect\n\tgithub.com/gomodule/redigo v2.0.0+incompatible // indirect\n\tgithub.com/google/gnostic-models v0.6.8 // indirect\n\tgithub.com/google/go-tpm v0.9.8 // indirect\n\tgithub.com/google/gofuzz v1.2.0 // indirect\n\tgithub.com/google/s2a-go v0.1.9 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect\n\tgithub.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect\n\tgithub.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect\n\tgithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect\n\tgithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect\n\tgithub.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047 // indirect\n\tgithub.com/hashicorp/go-msgpack v1.1.5 // indirect\n\tgithub.com/hashicorp/go-secure-stdlib/mlock v0.1.2 // indirect\n\tgithub.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect\n\tgithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect\n\tgithub.com/hashicorp/go-version v1.7.0 // indirect\n\tgithub.com/hashicorp/yamux v0.1.2 // indirect\n\tgithub.com/jcmturner/aescts/v2 v2.0.0 // indirect\n\tgithub.com/jcmturner/dnsutils/v2 v2.0.0 // indirect\n\tgithub.com/jcmturner/gokrb5/v8 v8.4.4 // indirect\n\tgithub.com/jcmturner/rpc/v2 v2.0.3 // indirect\n\tgithub.com/josharian/intern v1.0.0 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/juju/ratelimit v1.0.2 // indirect\n\tgithub.com/kylelemons/godebug v1.1.0 // indirect\n\tgithub.com/mailru/easyjson v0.7.7 // indirect\n\tgithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect\n\tgithub.com/mitchellh/copystructure v1.2.0 // indirect\n\tgithub.com/mitchellh/go-testing-interface v1.14.1 // indirect\n\tgithub.com/mitchellh/reflectwalk v1.0.2 // indirect\n\tgithub.com/moby/docker-image-spec v1.3.1 // indirect\n\tgithub.com/moby/sys/sequential v0.6.0 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.2 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/nats-io/jwt/v2 v2.8.0 // indirect\n\tgithub.com/oapi-codegen/runtime v1.0.0 // indirect\n\tgithub.com/oklog/run v1.2.0 // indirect\n\tgithub.com/pelletier/go-toml/v2 v2.2.3 // indirect\n\tgithub.com/pierrec/lz4/v4 v4.1.22 // indirect\n\tgithub.com/pjbgf/sha1cd v0.3.2 // indirect\n\tgithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect\n\tgithub.com/sagikazarmark/locafero v0.7.0 // indirect\n\tgithub.com/sagikazarmark/slog-shim v0.1.0 // indirect\n\tgithub.com/skeema/knownhosts v1.3.1 // indirect\n\tgithub.com/sourcegraph/conc v0.3.0 // indirect\n\tgithub.com/spiffe/go-spiffe/v2 v2.6.0 // indirect\n\tgithub.com/x448/float16 v0.8.4 // indirect\n\tgithub.com/zealic/xignore v0.3.3 // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect\n\tgo.opentelemetry.io/otel v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/sdk v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.40.0 // indirect\n\tgo.uber.org/atomic v1.11.0 // indirect\n\tgo.uber.org/multierr v1.11.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.2 // indirect\n\tgolang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect\n\tgolang.org/x/term v0.40.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect\n\tgopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n\tk8s.io/klog/v2 v2.130.1 // indirect\n\tk8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect\n\tsigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect\n\tsigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect\n\tsigs.k8s.io/yaml v1.4.0 // indirect\n)\n\nrequire (\n\tcloud.google.com/go v0.120.0 // indirect\n\tcloud.google.com/go/storage v1.50.0 // indirect\n\tgithub.com/AlekSi/pointer v1.2.0\n\tgithub.com/Masterminds/goutils v1.1.1 // indirect\n\tgithub.com/Microsoft/go-winio v0.6.2 // indirect\n\tgithub.com/ProtonMail/go-crypto v1.1.6 // indirect\n\tgithub.com/Shopify/ejson v1.3.3 // indirect\n\tgithub.com/armon/go-metrics v0.4.1 // indirect\n\tgithub.com/aws/aws-sdk-go v1.55.7 // indirect\n\tgithub.com/aws/aws-sdk-go-v2 v1.36.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect\n\tgithub.com/aws/smithy-go v1.22.2 // indirect\n\tgithub.com/bcicen/go-units v1.0.3\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cenkalti/backoff/v3 v3.2.2 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/chzyer/readline v1.5.1 // indirect\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect\n\tgithub.com/docker/go-connections v0.4.0 // indirect\n\tgithub.com/docker/go-units v0.5.0 // indirect\n\tgithub.com/docker/libkv v0.2.2-0.20180912205406-458977154600 // indirect\n\tgithub.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad // indirect\n\tgithub.com/eapache/go-resiliency v1.7.0 // indirect\n\tgithub.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect\n\tgithub.com/eapache/queue v1.1.0 // indirect\n\tgithub.com/emirpasic/gods v1.18.1 // indirect\n\tgithub.com/fatih/color v1.16.0 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect\n\tgithub.com/go-git/go-billy/v5 v5.6.2 // indirect\n\tgithub.com/go-git/go-git/v5 v5.16.5 // indirect\n\tgithub.com/gogo/protobuf v1.3.2\n\tgithub.com/golang/glog v1.2.5 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/golang/snappy v1.0.0\n\tgithub.com/google/wire v0.5.0 // indirect\n\tgithub.com/googleapis/gax-go/v2 v2.14.2 // indirect\n\tgithub.com/gosimple/slug v1.12.0 // indirect\n\tgithub.com/gosimple/unidecode v1.0.1 // indirect\n\tgithub.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf // indirect\n\tgithub.com/hashicorp/errwrap v1.1.0 // indirect\n\tgithub.com/hashicorp/go-cleanhttp v0.5.2 // indirect\n\tgithub.com/hashicorp/go-hclog v1.6.3\n\tgithub.com/hashicorp/go-immutable-radix v1.3.1 // indirect\n\tgithub.com/hashicorp/go-multierror v1.1.1 // indirect\n\tgithub.com/hashicorp/go-retryablehttp v0.7.7 // indirect\n\tgithub.com/hashicorp/go-rootcerts v1.0.2 // indirect\n\tgithub.com/hashicorp/go-sockaddr v1.0.2 // indirect\n\tgithub.com/hashicorp/go-uuid v1.0.3 // indirect\n\tgithub.com/hashicorp/golang-lru v1.0.2 // indirect\n\tgithub.com/hashicorp/hcl v1.0.0 // indirect\n\tgithub.com/hashicorp/serf v0.10.1 // indirect\n\tgithub.com/hashicorp/vault/api v1.6.0 // indirect\n\tgithub.com/hashicorp/vault/sdk v0.5.0 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.1.0 // indirect\n\tgithub.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect\n\tgithub.com/itchyny/timefmt-go v0.1.7 // indirect\n\tgithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect\n\tgithub.com/jcmturner/gofork v1.7.6 // indirect\n\tgithub.com/jmespath/go-jmespath v0.4.0 // indirect\n\tgithub.com/joho/godotenv v1.4.0 // indirect\n\tgithub.com/kevinburke/ssh_config v1.2.0 // indirect\n\tgithub.com/klauspost/compress v1.18.3 // indirect\n\tgithub.com/kr/fs v0.1.0 // indirect\n\tgithub.com/magiconair/properties v1.8.9 // indirect\n\tgithub.com/mattn/go-colorable v0.1.13 // indirect\n\tgithub.com/mattn/go-isatty v0.0.20 // indirect\n\tgithub.com/mattn/go-runewidth v0.0.19 // indirect\n\tgithub.com/mattn/go-tty v0.0.4 // indirect\n\tgithub.com/nats-io/nats-server/v2 v2.12.4 // indirect\n\tgithub.com/nats-io/nkeys v0.4.15 // indirect\n\tgithub.com/nats-io/nuid v1.0.1 // indirect\n\tgithub.com/openconfig/grpctunnel v0.1.0\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.0.2 // indirect\n\tgithub.com/pierrec/lz4 v2.6.1+incompatible // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/pkg/term v1.2.0-beta.2 // indirect\n\tgithub.com/prometheus/common v0.66.1\n\tgithub.com/prometheus/procfs v0.16.1 // indirect\n\tgithub.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect\n\tgithub.com/rs/zerolog v1.29.0 // indirect\n\tgithub.com/ryanuber/go-glob v1.0.0 // indirect\n\tgithub.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect\n\tgithub.com/spf13/afero v1.12.0 // indirect\n\tgithub.com/spf13/cast v1.7.1 // indirect\n\tgithub.com/subosito/gotenv v1.6.0 // indirect\n\tgithub.com/ugorji/go/codec v1.2.11 // indirect\n\tgithub.com/xanzy/ssh-agent v0.3.3 // indirect\n\tgithub.com/xdg/stringprep v1.0.0 // indirect\n\tgo.etcd.io/bbolt v1.3.10 // indirect\n\tgo.opencensus.io v0.24.0 // indirect\n\tgo4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect\n\tgo4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect\n\tgocloud.dev v0.25.1-0.20220408200107-09b10f7359f7 // indirect\n\tgolang.org/x/net v0.50.0 // indirect\n\tgolang.org/x/sys v0.41.0 // indirect\n\tgolang.org/x/text v0.34.0\n\tgolang.org/x/time v0.14.0 // indirect\n\tgolang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect\n\tgoogle.golang.org/api v0.239.0 // indirect\n\tgoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect\n\tgopkg.in/ini.v1 v1.67.0 // indirect\n\tgopkg.in/square/go-jose.v2 v2.6.0 // indirect\n\tgopkg.in/warnings.v0 v0.1.2 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n\tinet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect\n\tk8s.io/client-go v0.32.3\n)\n"
  },
  {
    "path": "go.sum",
    "content": "bitbucket.org/creachadair/stringset v0.0.14 h1:t1ejQyf8utS4GZV/4fM+1gvYucggZkfhb+tMobDxYOE=\nbitbucket.org/creachadair/stringset v0.0.14/go.mod h1:Ej8fsr6rQvmeMDf6CCWMWGb14H9mz8kmDgPPTdiVT0w=\ncel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=\ncel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=\ncloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=\ncloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=\ncloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=\ncloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=\ncloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=\ncloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=\ncloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=\ncloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=\ncloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=\ncloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=\ncloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=\ncloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=\ncloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=\ncloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA=\ncloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=\ncloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=\ncloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=\ncloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=\ncloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=\ncloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=\ncloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=\ncloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=\ncloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=\ncloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=\ncloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=\ncloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=\ncloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=\ncloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=\ncloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=\ncloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=\ncloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=\ncloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=\ncloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=\ncloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=\ncloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=\ncloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=\ncloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=\ncloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=\ncloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=\ncloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=\ncloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw=\ncloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=\ncloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=\ncloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=\ncloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI=\ncloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=\ncloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=\ncloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=\ncloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=\ncloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=\ncloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4=\ncloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4=\ncloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=\ncloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=\ncloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=\ncloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=\ncloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs=\ncloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ncloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=\ncloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=\ncloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=\ncloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=\ncloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA=\ncloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs=\ncloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY=\ncloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A=\ncloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM=\ncloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=\ncloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=\ncontrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=\ncontrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8=\ncontrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=\ndario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=\ndario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w=\ngithub.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=\ngithub.com/Azure/azure-amqp-common-go/v3 v3.2.1/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI=\ngithub.com/Azure/azure-amqp-common-go/v3 v3.2.2/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI=\ngithub.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=\ngithub.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=\ngithub.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=\ngithub.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=\ngithub.com/Azure/azure-service-bus-go v0.11.5/go.mod h1:MI6ge2CuQWBVq+ly456MY7XqNLJip5LO1iSFodbNLbU=\ngithub.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM=\ngithub.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=\ngithub.com/Azure/go-amqp v0.16.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=\ngithub.com/Azure/go-amqp v0.16.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=\ngithub.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=\ngithub.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=\ngithub.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU=\ngithub.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=\ngithub.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=\ngithub.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=\ngithub.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=\ngithub.com/IBM/sarama v1.46.3 h1:njRsX6jNlnR+ClJ8XmkO+CM4unbrNr/2vB5KK6UA+IE=\ngithub.com/IBM/sarama v1.46.3/go.mod h1:GTUYiF9DMOZVe3FwyGT+dtSPceGFIgA+sPc5u6CBwko=\ngithub.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg=\ngithub.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=\ngithub.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=\ngithub.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=\ngithub.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=\ngithub.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=\ngithub.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=\ngithub.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=\ngithub.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=\ngithub.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=\ngithub.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=\ngithub.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=\ngithub.com/Shopify/ejson v1.3.3 h1:dPzgmvFhUPTJIzwdF5DaqbwW1dWaoR8ADKRdSTy6Mss=\ngithub.com/Shopify/ejson v1.3.3/go.mod h1:VZMUtDzvBW/PAXRUF5fzp1ffb1ucT8MztrZXXLYZurw=\ngithub.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=\ngithub.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=\ngithub.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=\ngithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM=\ngithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E=\ngithub.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=\ngithub.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=\ngithub.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=\ngithub.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=\ngithub.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=\ngithub.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=\ngithub.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=\ngithub.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=\ngithub.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=\ngithub.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=\ngithub.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=\ngithub.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=\ngithub.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=\ngithub.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=\ngithub.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=\ngithub.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=\ngithub.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=\ngithub.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=\ngithub.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=\ngithub.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=\ngithub.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM=\ngithub.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg=\ngithub.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs=\ngithub.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=\ngithub.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.12.4/go.mod h1:7g+GGSp7xtR823o1jedxKmqRZGqLdoHQfI4eFasKKxs=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 h1:qpJmFbypCfwPok5PGTSnQy1NKbv4Hn8xGsee9l4xOPE=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14/go.mod h1:IOYB+xOZik8YgdTlnDSwbvKmCkikA3nVue8/Qnfzs0c=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 h1:1fs9WkbFcMawQjxEI0B5L0SqvBhJZebxWM6Z3x/qHWY=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2/go.mod h1:0jDVeWUFPbI3sOfsXXAsIdiawXcn7VBLx/IlFVTRP64=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 h1:9mvDAsMiN+07wcfGM+hJ1J3dOKZ2YOpDiPZ6ufRJcgw=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6/go.mod h1:Eus+Z2iBIEfhOvhSdMTcscNOMy6n3X9/BJV0Zgax98w=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 h1:DyPYkrH4R2zn+Pdu6hM3VTuPsQYAE6x2WB24X85Sgw0=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5/go.mod h1:XtL92YWo0Yq80iN3AgYRERJqohg4TozrqRlxYhHGJ7g=\ngithub.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 h1:GWdLZK0r1AK5sKb8rhB9bEXqXCK8WNuyv4TBAD6ZviQ=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.26.10/go.mod h1:+O7qJxF8nLorAhuIVhYTHse6okjHJJm4EwhhzvpnkT0=\ngithub.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4/go.mod h1:PJc8s+lxyU8rrre0/4a0pn2wgwiDvOEzoOjcJUBr67o=\ngithub.com/aws/aws-sdk-go-v2/service/sns v1.17.4/go.mod h1:kElt+uCcXxcqFyc+bQqZPFD9DME/eC6oHBXvFzQ9Bcw=\ngithub.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM=\ngithub.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.11.7/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=\ngithub.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=\ngithub.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=\ngithub.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=\ngithub.com/bcicen/bfstree v1.0.0 h1:Fx9vcyXYspj2GIJqAvd1lwCNI+cQF/r2JJqxHHmsAO0=\ngithub.com/bcicen/bfstree v1.0.0/go.mod h1:u//juIip96SNFkG4iMn9z0KzqLSeFSpBKoBo5ceq1uE=\ngithub.com/bcicen/go-units v1.0.3 h1:REknRsBTdM2+ihTw1DiOsviGQSX7I6jQaPCWTWerBl4=\ngithub.com/bcicen/go-units v1.0.3/go.mod h1:c7/sSz9cc6XvnrjsyNwoKHqN6KDDf8LME5vSf+U5Y08=\ngithub.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=\ngithub.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=\ngithub.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=\ngithub.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=\ngithub.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=\ngithub.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=\ngithub.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=\ngithub.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=\ngithub.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI=\ngithub.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY=\ngithub.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=\ngithub.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=\ngithub.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=\ngithub.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=\ngithub.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=\ngithub.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=\ngithub.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=\ngithub.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=\ngithub.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=\ngithub.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=\ngithub.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=\ngithub.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=\ngithub.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=\ngithub.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=\ngithub.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=\ngithub.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=\ngithub.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=\ngithub.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=\ngithub.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=\ngithub.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=\ngithub.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=\ngithub.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=\ngithub.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=\ngithub.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=\ngithub.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=\ngithub.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU=\ngithub.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d h1:hUWoLdw5kvo2xCsqlsIBMvWUc1QCSsCYD2J2+Fg6YoU=\ngithub.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d/go.mod h1:C7Es+DLenIpPc9J6IYw4jrK0h7S9bKj4DNl8+KxGEXU=\ngithub.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=\ngithub.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=\ngithub.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=\ngithub.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=\ngithub.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=\ngithub.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=\ngithub.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=\ngithub.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=\ngithub.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/docker/libkv v0.2.2-0.20180912205406-458977154600 h1:x0AMRhackzbivKKiEeSMzH6gZmbALPXCBG0ecBmRlco=\ngithub.com/docker/libkv v0.2.2-0.20180912205406-458977154600/go.mod h1:r5hEwHwW8dr0TFBYGCarMNbrQOiwL1xoqDYZ/JqoTK0=\ngithub.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad h1:Qk76DOWdOp+GlyDKBAG3Klr9cn7N+LcYc82AZ2S7+cA=\ngithub.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad/go.mod h1:mPKfmRa823oBIgl2r20LeMSpTAteW5j7FLkc0vjmzyQ=\ngithub.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=\ngithub.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA=\ngithub.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=\ngithub.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws=\ngithub.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=\ngithub.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=\ngithub.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=\ngithub.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=\ngithub.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=\ngithub.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=\ngithub.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=\ngithub.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=\ngithub.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=\ngithub.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=\ngithub.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=\ngithub.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=\ngithub.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=\ngithub.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=\ngithub.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=\ngithub.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=\ngithub.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=\ngithub.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=\ngithub.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=\ngithub.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=\ngithub.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=\ngithub.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=\ngithub.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=\ngithub.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=\ngithub.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=\ngithub.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=\ngithub.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=\ngithub.com/fullstorydev/grpcurl v1.9.3 h1:PC1Xi3w+JAvEE2Tg2Gf2RfVgPbf9+tbuQr1ZkyVU3jk=\ngithub.com/fullstorydev/grpcurl v1.9.3/go.mod h1:/b4Wxe8bG6ndAjlfSUjwseQReUDUvBJiFEB7UllOlUE=\ngithub.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=\ngithub.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=\ngithub.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=\ngithub.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=\ngithub.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=\ngithub.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=\ngithub.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=\ngithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=\ngithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=\ngithub.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=\ngithub.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=\ngithub.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=\ngithub.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=\ngithub.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s=\ngithub.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M=\ngithub.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=\ngithub.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=\ngithub.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=\ngithub.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=\ngithub.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=\ngithub.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=\ngithub.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=\ngithub.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=\ngithub.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=\ngithub.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=\ngithub.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=\ngithub.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=\ngithub.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=\ngithub.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=\ngithub.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=\ngithub.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=\ngithub.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI=\ngithub.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=\ngithub.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=\ngithub.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=\ngithub.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA=\ngithub.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=\ngithub.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=\ngithub.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=\ngithub.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=\ngithub.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=\ngithub.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=\ngithub.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=\ngithub.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=\ngithub.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=\ngithub.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=\ngithub.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=\ngithub.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=\ngithub.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=\ngithub.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=\ngithub.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=\ngithub.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=\ngithub.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=\ngithub.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=\ngithub.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=\ngithub.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=\ngithub.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=\ngithub.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=\ngithub.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=\ngithub.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=\ngithub.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=\ngithub.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=\ngithub.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=\ngithub.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE=\ngithub.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=\ngithub.com/google/go-replayers/httpreplay v1.1.1 h1:H91sIMlt1NZzN7R+/ASswyouLJfW0WLW7fhyUFvDEkY=\ngithub.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks=\ngithub.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo=\ngithub.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=\ngithub.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=\ngithub.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=\ngithub.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=\ngithub.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=\ngithub.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=\ngithub.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=\ngithub.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=\ngithub.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=\ngithub.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=\ngithub.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=\ngithub.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=\ngithub.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=\ngithub.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=\ngithub.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=\ngithub.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=\ngithub.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=\ngithub.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=\ngithub.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=\ngithub.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=\ngithub.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=\ngithub.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=\ngithub.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=\ngithub.com/gosimple/slug v1.12.0 h1:xzuhj7G7cGtd34NXnW/yF0l+AGNfWqwgh/IXgFy7dnc=\ngithub.com/gosimple/slug v1.12.0/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ=\ngithub.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o=\ngithub.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc=\ngithub.com/gosnmp/gosnmp v1.42.1 h1:MEJxhpC5v1coL3tFRix08PYmky9nyb1TLRRgJAmXm8A=\ngithub.com/gosnmp/gosnmp v1.42.1/go.mod h1:CxVS6bXqmWZlafUj9pZUnQX5e4fAltqPcijxWpCitDo=\ngithub.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac=\ngithub.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc=\ngithub.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=\ngithub.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=\ngithub.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=\ngithub.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=\ngithub.com/guptarohit/asciigraph v0.7.3 h1:p05XDDn7cBTWiBqWb30mrwxd6oU0claAjqeytllnsPY=\ngithub.com/guptarohit/asciigraph v0.7.3/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag=\ngithub.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047 h1:nSSfN9G8O8XXDqB3aDEHJ8K+0llYYToNlTcWOe1Pti8=\ngithub.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047/go.mod h1:30RY4Ey+bg+BGKBufZE2IEmxk7hok9U9mjdgZYomwN4=\ngithub.com/hairyhenderson/gomplate/v3 v3.11.8 h1:T63wLRk+Y9C601ChYa/+FZ30XT/UEWydMDZhOOJM3K0=\ngithub.com/hairyhenderson/gomplate/v3 v3.11.8/go.mod h1:xs1LnI1NftnB6o0Zvy1aLgDMSGUvGjz4uCQAZSIMP04=\ngithub.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf h1:I1sbT4ZbIt9i+hB1zfKw2mE8C12TuGxPiW7YmtLbPa4=\ngithub.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf/go.mod h1:jDHmWDKZY6MIIYltYYfW4Rs7hQ50oS4qf/6spSiZAxY=\ngithub.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce h1:cVkYhlWAxwuS2/Yp6qPtcl0fGpcWxuZNonywHZ6/I+s=\ngithub.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce/go.mod h1:7TyiGlHI+IO+iJbqRZ82QbFtvgj/AIcFm5qc9DLn7Kc=\ngithub.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=\ngithub.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc=\ngithub.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=\ngithub.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=\ngithub.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=\ngithub.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=\ngithub.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=\ngithub.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=\ngithub.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=\ngithub.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=\ngithub.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=\ngithub.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=\ngithub.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=\ngithub.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=\ngithub.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=\ngithub.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=\ngithub.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=\ngithub.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=\ngithub.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=\ngithub.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=\ngithub.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=\ngithub.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=\ngithub.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=\ngithub.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA=\ngithub.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8=\ngithub.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=\ngithub.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=\ngithub.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=\ngithub.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=\ngithub.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=\ngithub.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=\ngithub.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw=\ngithub.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=\ngithub.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ=\ngithub.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.1.5/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=\ngithub.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo=\ngithub.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=\ngithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=\ngithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=\ngithub.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=\ngithub.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=\ngithub.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=\ngithub.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=\ngithub.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=\ngithub.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=\ngithub.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=\ngithub.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=\ngithub.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=\ngithub.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=\ngithub.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=\ngithub.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=\ngithub.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=\ngithub.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=\ngithub.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=\ngithub.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=\ngithub.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=\ngithub.com/hashicorp/vault/api v1.6.0 h1:B8UUYod1y1OoiGHq9GtpiqSnGOUEWHaA26AY8RQEDY4=\ngithub.com/hashicorp/vault/api v1.6.0/go.mod h1:h1K70EO2DgnBaTz5IsL6D5ERsNt5Pce93ueVS2+t0Xc=\ngithub.com/hashicorp/vault/sdk v0.5.0 h1:EED7p0OCU3OY5SAqJwSANofY1YKMytm+jDHDQ2EzGVQ=\ngithub.com/hashicorp/vault/sdk v0.5.0/go.mod h1:UJZHlfwj7qUJG8g22CuxUgkdJouFrBNvBHCyx8XAPdo=\ngithub.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=\ngithub.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=\ngithub.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=\ngithub.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=\ngithub.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=\ngithub.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=\ngithub.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=\ngithub.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=\ngithub.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=\ngithub.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc=\ngithub.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s=\ngithub.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4GMMA=\ngithub.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI=\ngithub.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=\ngithub.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=\ngithub.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=\ngithub.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=\ngithub.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=\ngithub.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=\ngithub.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=\ngithub.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=\ngithub.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=\ngithub.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=\ngithub.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=\ngithub.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=\ngithub.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=\ngithub.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=\ngithub.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=\ngithub.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=\ngithub.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=\ngithub.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=\ngithub.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=\ngithub.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=\ngithub.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=\ngithub.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=\ngithub.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=\ngithub.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw=\ngithub.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=\ngithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=\ngithub.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=\ngithub.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=\ngithub.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=\ngithub.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=\ngithub.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=\ngithub.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=\ngithub.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=\ngithub.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=\ngithub.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=\ngithub.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=\ngithub.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=\ngithub.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=\ngithub.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY=\ngithub.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4=\ngithub.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=\ngithub.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=\ngithub.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=\ngithub.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8=\ngithub.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=\ngithub.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=\ngithub.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20220517215058-83a58ec253b6 h1:Twy/cqAmdLarn9QEiRvyX5eUyuKFxqMEiy5GQGIqwjo=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20220517215058-83a58ec253b6/go.mod h1:LIAXxPvcUXwOcTIj9LSNSUpE9/eMHalTWxsP/kmWxQI=\ngithub.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=\ngithub.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg=\ngithub.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=\ngithub.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=\ngithub.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=\ngithub.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI=\ngithub.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/karimra/go-map-flattener v0.0.1 h1:hkNYOZxHKdRHPwP5pM1glOPoL12U7Cpmbp7OcEH2BUc=\ngithub.com/karimra/go-map-flattener v0.0.1/go.mod h1:qwSIH4cR7eD1dkmjx0S/rqsO33C6VYaTHLrdfntJQkM=\ngithub.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c h1:dlqPOgewPbpD8HhckpNqNKZRRXIEJLcVadzJIZT4RNM=\ngithub.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c/go.mod h1:KcjPi49Pbs+EF8Ykob5AzLcze653Qb4HFz+i2aFEEJU=\ngithub.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=\ngithub.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=\ngithub.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=\ngithub.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=\ngithub.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=\ngithub.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=\ngithub.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM=\ngithub.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=\ngithub.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=\ngithub.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=\ngithub.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=\ngithub.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=\ngithub.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=\ngithub.com/mattn/go-ieproxy v0.0.6 h1:tVDlituRyeHMMkHpGpUu8CJG+hxPMwbYCkIUK2PUCbo=\ngithub.com/mattn/go-ieproxy v0.0.6/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko=\ngithub.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=\ngithub.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=\ngithub.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=\ngithub.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=\ngithub.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=\ngithub.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=\ngithub.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0=\ngithub.com/mattn/go-tty v0.0.4 h1:NVikla9X8MN0SQAqCYzpGyXv0jY7MNl3HOWD2dkle7E=\ngithub.com/mattn/go-tty v0.0.4/go.mod h1:u5GGXBtZU6RQoKV8gY5W6UhMudbR5vXnUe7j3pxse28=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=\ngithub.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=\ngithub.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=\ngithub.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=\ngithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk=\ngithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=\ngithub.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=\ngithub.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=\ngithub.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=\ngithub.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=\ngithub.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=\ngithub.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=\ngithub.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=\ngithub.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=\ngithub.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=\ngithub.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=\ngithub.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=\ngithub.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=\ngithub.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=\ngithub.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=\ngithub.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=\ngithub.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=\ngithub.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=\ngithub.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=\ngithub.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=\ngithub.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=\ngithub.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=\ngithub.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=\ngithub.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g=\ngithub.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA=\ngithub.com/nats-io/nats-server/v2 v2.12.4 h1:ZnT10v2LU2Xcoiy8ek9X6Se4YG8EuMfIfvAEuFVx1Ts=\ngithub.com/nats-io/nats-server/v2 v2.12.4/go.mod h1:5MCp/pqm5SEfsvVZ31ll1088ZTwEUdvRX1Hmh/mTTDg=\ngithub.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE=\ngithub.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw=\ngithub.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4=\ngithub.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs=\ngithub.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=\ngithub.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=\ngithub.com/nsf/termbox-go v1.1.1 h1:nksUPLCb73Q++DwbYUBEglYBRPZyoXJdrj5L+TkjyZY=\ngithub.com/nsf/termbox-go v1.1.1/go.mod h1:T0cTdVuOwf7pHQNtfhnEbzHbcNyCEcVU4YPpouCbVxo=\ngithub.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=\ngithub.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=\ngithub.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo=\ngithub.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A=\ngithub.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=\ngithub.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=\ngithub.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=\ngithub.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=\ngithub.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=\ngithub.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=\ngithub.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=\ngithub.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=\ngithub.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=\ngithub.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=\ngithub.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=\ngithub.com/openconfig/gnmi v0.0.0-20200508230933-d19cebf5e7be/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A=\ngithub.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs=\ngithub.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0=\ngithub.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU=\ngithub.com/openconfig/goyang v1.6.3 h1:9nWXBwd6b4+nZr8ni7O4zUXVhrVMXCLFz8os5YWFuo4=\ngithub.com/openconfig/goyang v1.6.3/go.mod h1:5WolITjek1NF8yrNERyVZ7jqjOClJTpO8p/+OwmETM4=\ngithub.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM=\ngithub.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo=\ngithub.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs=\ngithub.com/openconfig/ygot v0.34.0 h1:9OkVjy3SGi4mbvAZc4HTQBU9u4MT6k4j5DdX+hgRiC4=\ngithub.com/openconfig/ygot v0.34.0/go.mod h1:eMNQHrJpanet+pQoBw/P3ua4sLY/tRTXyJ7ALkWCvl4=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=\ngithub.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=\ngithub.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=\ngithub.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=\ngithub.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=\ngithub.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=\ngithub.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=\ngithub.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=\ngithub.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=\ngithub.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=\ngithub.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=\ngithub.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=\ngithub.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=\ngithub.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw=\ngithub.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA=\ngithub.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw=\ngithub.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=\ngithub.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=\ngithub.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=\ngithub.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=\ngithub.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=\ngithub.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=\ngithub.com/prometheus/prometheus v0.306.0 h1:Q0Pvz/ZKS6vVWCa1VSgNyNJlEe8hxdRlKklFg7SRhNw=\ngithub.com/prometheus/prometheus v0.306.0/go.mod h1:7hMSGyZHt0dcmZ5r4kFPJ/vxPQU99N5/BGwSPDxeZrQ=\ngithub.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=\ngithub.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=\ngithub.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE=\ngithub.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=\ngithub.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=\ngithub.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=\ngithub.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=\ngithub.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=\ngithub.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=\ngithub.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=\ngithub.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=\ngithub.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=\ngithub.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w=\ngithub.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=\ngithub.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=\ngithub.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=\ngithub.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=\ngithub.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=\ngithub.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=\ngithub.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=\ngithub.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=\ngithub.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=\ngithub.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=\ngithub.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=\ngithub.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=\ngithub.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=\ngithub.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=\ngithub.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=\ngithub.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=\ngithub.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=\ngithub.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=\ngithub.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=\ngithub.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=\ngithub.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=\ngithub.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=\ngithub.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=\ngithub.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=\ngithub.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=\ngithub.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=\ngithub.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=\ngithub.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=\ngithub.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=\ngithub.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=\ngithub.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM=\ngithub.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8=\ngithub.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=\ngithub.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=\ngithub.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=\ngithub.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=\ngithub.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=\ngithub.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=\ngithub.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=\ngithub.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=\ngithub.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=\ngithub.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=\ngithub.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=\ngithub.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw=\ngithub.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=\ngithub.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=\ngithub.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=\ngithub.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngithub.com/zealic/xignore v0.3.3 h1:EpLXUgZY/JEzFkTc+Y/VYypzXtNz+MSOMVCGW5Q4CKQ=\ngithub.com/zealic/xignore v0.3.3/go.mod h1:lhS8V7fuSOtJOKsvKI7WfsZE276/7AYEqokv3UiqEAU=\ngithub.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=\ngithub.com/zestor-dev/zestor v0.0.2 h1:UyM7G7QPwDRThgCWxm3DUCaAkKte6s7fVhOWBVvDH3Y=\ngithub.com/zestor-dev/zestor v0.0.2/go.mod h1:gffTEDJU8OE+V1gewC+yL1x6MpZOeNsys5jMQPv0S3k=\ngo.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=\ngo.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=\ngo.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=\ngo.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=\ngo.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=\ngo.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=\ngo.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=\ngo.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=\ngo.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=\ngo.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=\ngo.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=\ngo.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=\ngo.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=\ngo.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=\ngo.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=\ngo.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE=\ngo.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0=\ngo.starlark.net v0.0.0-20260102030733-3fee463870c9 h1:nV1OyvU+0CYrp5eKfQ3rD03TpFYYhH08z31NK1HmtTk=\ngo.starlark.net v0.0.0-20260102030733-3fee463870c9/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8=\ngo.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=\ngo.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=\ngo.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=\ngo.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=\ngo.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=\ngo.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=\ngo.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=\ngo.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=\ngo.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=\ngo.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=\ngo.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=\ngo.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=\ngo.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=\ngo.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=\ngo.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=\ngo.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=\ngo.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=\ngo4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA=\ngo4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI=\ngo4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE=\ngo4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=\ngo4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=\ngo4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=\ngo4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4=\ngo4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=\ngocloud.dev v0.25.1-0.20220408200107-09b10f7359f7 h1:esuNxgk6HkmcadSJQCFnGOfyufN1GW1gtFJDwUbmYOw=\ngocloud.dev v0.25.1-0.20220408200107-09b10f7359f7/go.mod h1:mkUgejbnbLotorqDyvedJO20XcZNTynmSeVSQS9btVg=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=\ngolang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=\ngolang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=\ngolang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=\ngolang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=\ngolang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=\ngolang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=\ngolang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=\ngolang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=\ngolang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=\ngolang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=\ngolang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=\ngolang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=\ngolang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=\ngolang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=\ngolang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=\ngolang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=\ngolang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=\ngolang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=\ngolang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=\ngolang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=\ngolang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=\ngolang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=\ngolang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=\ngolang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=\ngolang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=\ngolang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=\ngolang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\ngolang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=\ngolang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=\ngolang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=\ngolang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=\ngolang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=\ngolang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=\ngolang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=\ngolang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=\ngolang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=\ngolang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=\ngolang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=\ngolang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=\ngolang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=\ngolang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=\ngolang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=\ngolang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=\ngolang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=\ngolang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=\ngolang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=\ngolang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=\ngolang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=\ngolang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=\ngoogle.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=\ngoogle.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=\ngoogle.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=\ngoogle.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=\ngoogle.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=\ngoogle.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=\ngoogle.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=\ngoogle.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=\ngoogle.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=\ngoogle.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=\ngoogle.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=\ngoogle.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=\ngoogle.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=\ngoogle.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=\ngoogle.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=\ngoogle.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=\ngoogle.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=\ngoogle.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=\ngoogle.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=\ngoogle.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=\ngoogle.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=\ngoogle.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=\ngoogle.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8=\ngoogle.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=\ngoogle.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=\ngoogle.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=\ngoogle.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=\ngoogle.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=\ngoogle.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=\ngoogle.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=\ngoogle.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=\ngoogle.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=\ngoogle.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=\ngoogle.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=\ngoogle.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=\ngoogle.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=\ngoogle.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=\ngoogle.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=\ngoogle.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=\ngoogle.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=\ngoogle.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=\ngoogle.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=\ngoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=\ngoogle.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=\ngoogle.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=\ngoogle.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=\ngoogle.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=\ngoogle.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=\ngoogle.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=\ngoogle.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=\ngoogle.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=\ngoogle.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=\ngoogle.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=\ngoogle.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=\ngoogle.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=\ngoogle.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngoogle.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=\ngoogle.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=\ngopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=\ngopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=\ngopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=\ngopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=\ngopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=\ngopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=\ngopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=\ngopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=\ngotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nhonnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nhonnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\ninet.af/netaddr v0.0.0-20230525184311-b8eac61e914a h1:1XCVEdxrvL6c0TGOhecLuB7U9zYNdxZEjvOqJreKZiM=\ninet.af/netaddr v0.0.0-20230525184311-b8eac61e914a/go.mod h1:e83i32mAQOW1LAqEIweALsuK2Uw4mhQadA5r7b0Wobo=\nk8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=\nk8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=\nk8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=\nk8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=\nk8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=\nk8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=\nk8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=\nk8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=\nk8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=\nk8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=\nk8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=\nk8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=\nnhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nrsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=\nrsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=\nsigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=\nsigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=\nsigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=\nsigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=\nsigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=\nsigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=\n"
  },
  {
    "path": "goreleaser-alpine.dockerfile",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFROM alpine\n\nLABEL maintainer=\"Karim Radhouani <medkarimrdi@gmail.com>, Roman Dodin <dodin.roman@gmail.com>\"\nLABEL documentation=\"https://gnmic.openconfig.net\"\nLABEL repo=\"https://github.com/openconfig/gnmic\"\n\nCOPY gnmic /app/gnmic\nENTRYPOINT [ \"/app/gnmic\" ]\nCMD [ \"help\" ]\n"
  },
  {
    "path": "goreleaser-scratch.dockerfile",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFROM scratch\n\nLABEL maintainer=\"Karim Radhouani <medkarimrdi@gmail.com>, Roman Dodin <dodin.roman@gmail.com>\"\nLABEL documentation=\"https://gnmic.openconfig.net\"\nLABEL repo=\"https://github.com/openconfig/gnmic\"\n\nCOPY gnmic /app/gnmic\nENTRYPOINT [ \"/app/gnmic\" ]\nCMD [ \"help\" ]\n"
  },
  {
    "path": "install.sh",
    "content": "#!/usr/bin/env bash\n# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n\n# The install script is based off of the Apache 2.0 script from Helm,\n# https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3\n\n: ${BINARY_NAME:=\"gnmic\"}\n: ${PROJECT_NAME:=\"gnmic\"} # if project name does not match binary name\n: ${USE_SUDO:=\"true\"}\n: ${USE_PKG:=\"false\"} # default --use-pkg flag value. will use package installation by default unless the default is changed to false\n: ${VERIFY_CHECKSUM:=\"false\"}\n: ${BIN_INSTALL_DIR:=\"/usr/local/bin\"}\n: ${REPO_NAME:=\"openconfig/gnmic\"}\n: ${REPO_URL:=\"https://github.com/$REPO_NAME\"}\n: ${PROJECT_URL:=\"https://gnmic.openconfig.net\"}\n: ${LATEST_URL:=\"https://api.github.com/repos/$REPO_NAME/releases/latest\"}\n# detectArch discovers the architecture for this system.\ndetectArch() {\n    ARCH=$(uname -m)\n    # case $ARCH in\n    # armv5*) ARCH=\"armv5\" ;;\n    # armv6*) ARCH=\"armv6\" ;;\n    # armv7*) ARCH=\"arm\" ;;\n    # aarch64) ARCH=\"arm64\" ;;\n    # x86) ARCH=\"386\" ;;\n    # x86_64) ARCH=\"amd64\" ;;\n    # i686) ARCH=\"386\" ;;\n    # i386) ARCH=\"386\" ;;\n    # esac\n}\n\n# detectOS discovers the operating system for this system and its package format\ndetectOS() {\n    OS=$(echo $(uname) | tr '[:upper:]' '[:lower:]')\n\n    case \"$OS\" in\n    # Minimalist GNU for Windows\n    mingw*) OS='windows' ;;\n    esac\n\n    if type \"rpm\" &>/dev/null; then\n        PKG_FORMAT=\"rpm\"\n    elif type \"dpkg\" &>/dev/null; then\n        PKG_FORMAT=\"deb\"\n    fi\n}\n\n# runs the given command as root (detects if we are root already)\nrunAsRoot() {\n    local CMD=\"$*\"\n\n    if [ $EUID -ne 0 -a $USE_SUDO = \"true\" ]; then\n        CMD=\"sudo $CMD\"\n    fi\n\n    $CMD\n}\n\n# verifySupported checks that the os/arch combination is supported\nverifySupported() {\n    local supported=\"darwin-x86_64\\ndarwin-aarch64\\nlinux-i386\\nlinux-x86_64\\nlinux-armv7\\nlinux-aarch64\"\n    # change ARCH to \"aarch64\" if OS=\"darwin\" and ARCH=\"arm64\"\n    if [ ${OS} == \"darwin\" ] && [ ${ARCH} == \"arm64\" ]; then\n        ARCH=\"aarch64\"\n    fi\n    if ! echo \"${supported}\" | grep -q \"${OS}-${ARCH}\"; then\n        echo \"No prebuilt binary for ${OS}-${ARCH}.\"\n        echo \"To build from source, go to ${REPO_URL}\"\n        exit 1\n    fi\n\n    if ! type \"curl\" &>/dev/null && ! type \"wget\" &>/dev/null; then\n        echo \"Either curl or wget is required\"\n        exit 1\n    fi\n}\n\n# verifyOpenssl checks if openssl is installed to perform checksum operation\nverifyOpenssl() {\n    if [ $VERIFY_CHECKSUM == \"true\" ]; then\n        if ! type \"openssl\" &>/dev/null; then\n            echo \"openssl is not found. It is used to verify checksum of the downloaded file.\"\n            exit 1\n        fi\n    fi\n}\n\n# setDesiredVersion sets the desired version either to an explicit version provided by a user\n# or to the latest release available on github releases\nsetDesiredVersion() {\n    if [ \"x$DESIRED_VERSION\" == \"x\" ]; then\n        # when desired version is not provided\n        # get latest tag from the gh releases\n        local cmd=\"\"\n        if type \"curl\" &>/dev/null; then\n            cmd=\"curl -s \"\n        elif type \"wget\" &>/dev/null; then\n            cmd=\"wget -q -O- \"\n        else\n            echo \"Missing curl or wget utility to download the installation package\"\n            exit 1\n        fi\n        local latest_release_url=\"\"\n        # use jq to filter the api response if available\n        if type \"jq\" &>/dev/null; then\n            latest_release_url=$($cmd $LATEST_URL | jq -r .html_url)\n        # else use grep and cut\n        else\n            latest_release_url=$($cmd $LATEST_URL | grep \"html_url.*releases/tag\" | cut -d '\"' -f 4)\n        fi\n        # check for empty response or null (GitHub API may return null on rate limit or errors)\n        if [ \"x$latest_release_url\" == \"x\" ] || [ \"$latest_release_url\" == \"null\" ]; then\n            echo \"Could not determine the latest release from GitHub API.\"\n            echo \"This may be due to GitHub API rate limiting. Please try again later or specify a version with --version flag.\"\n            exit 1\n        fi\n        TAG=$(echo $latest_release_url | cut -d '\"' -f 2 | awk -F \"/\" '{print $NF}')\n        # tag with stripped `v` prefix\n        TAG_WO_VER=$(echo \"${TAG}\" | cut -c 2-)\n        # validate that TAG looks like a version (should start with 'v')\n        if [[ ! \"$TAG\" =~ ^v[0-9] ]]; then\n            echo \"Error: Invalid version tag '$TAG' retrieved from GitHub API.\"\n            echo \"Expected a version starting with 'v' (e.g., v0.1.0). Please try again later or specify a version with --version flag.\"\n            exit 1\n        fi\n    else\n        TAG=$DESIRED_VERSION\n        TAG_WO_VER=$(echo \"${TAG}\" | cut -c 2-)\n    fi\n}\n\n# checkInstalledVersion checks which version is installed and\n# if it needs to be changed.\ncheckInstalledVersion() {\n    if [[ -f \"${BIN_INSTALL_DIR}/${BINARY_NAME}\" ]]; then\n        local version=$(\"${BIN_INSTALL_DIR}/${BINARY_NAME}\" version | grep version | awk '{print $NF}')\n        if [[ \"v$version\" == \"$TAG\" ]]; then\n            echo \"${BINARY_NAME} is already at ${DESIRED_VERSION:-latest ($version)}\" version\n            return 0\n        else\n            echo \"${BINARY_NAME} ${TAG_WO_VER} is available. Changing from version ${version}.\"\n            return 1\n        fi\n    else\n        return 1\n    fi\n}\n\n# createTempDir creates temporary directory where we downloaded files\ncreateTempDir() {\n    TMP_ROOT=\"$(mktemp -d)\"\n    TMP_BIN=\"$TMP_ROOT/$BINARY_NAME\"\n}\n\n# downloadFile downloads the latest binary archive, the checksum file and performs the sum check\ndownloadFile() {\n    EXT=\"tar.gz\" # download file extension\n    if [ $USE_PKG == \"true\" ]; then\n        if [ -z $PKG_FORMAT ]; then\n            echo \"Package for $OS-$ARCH is not available\"\n            cleanup\n            exit 1\n        fi\n        EXT=$PKG_FORMAT\n    fi\n    ARCHIVE=\"${PROJECT_NAME}_${TAG_WO_VER}_${OS}_${ARCH}.${EXT}\"\n    DOWNLOAD_URL=\"${REPO_URL}/releases/download/${TAG}/${ARCHIVE}\"\n    CHECKSUM_URL=\"${REPO_URL}/releases/download/${TAG}/checksums.txt\"\n    TMP_FILE=\"$TMP_ROOT/$ARCHIVE\"\n    SUM_FILE=\"$TMP_ROOT/checksums.txt\"\n    echo \"Downloading $DOWNLOAD_URL\"\n    if type \"curl\" &>/dev/null; then\n        curl -SsL \"$CHECKSUM_URL\" -o \"$SUM_FILE\"\n        curl -SsL \"$DOWNLOAD_URL\" -o \"$TMP_FILE\"\n    elif type \"wget\" &>/dev/null; then\n        wget -q -O \"$SUM_FILE\" \"$CHECKSUM_URL\"\n        wget -q -O \"$TMP_FILE\" \"$DOWNLOAD_URL\"\n    fi\n\n    # verify downloaded file\n    if [ $VERIFY_CHECKSUM == \"true\" ]; then\n        local sum=$(openssl sha1 -sha256 ${TMP_FILE} | awk '{print $2}')\n        local expected_sum=$(cat ${SUM_FILE} | grep -i $ARCHIVE | awk '{print $1}')\n        if [ \"$sum\" != \"$expected_sum\" ]; then\n            echo \"SHA sum of ${TMP_FILE} does not match. Aborting.\"\n            exit 1\n        fi\n        echo \"Checksum verified\"\n    fi\n}\n\n# installFile verifies the SHA256 for the file, then unpacks and\n# installs it. By default, the installation is done from .tar.gz archive, that can be overriden with --use-pkg flag\ninstallFile() {\n    tar xf \"$TMP_FILE\" -C \"$TMP_ROOT\"\n    echo \"Preparing to install $BINARY_NAME ${TAG_WO_VER} into ${BIN_INSTALL_DIR}\"\n    runAsRoot cp -f \"$TMP_ROOT/$BINARY_NAME\" \"$BIN_INSTALL_DIR/$BINARY_NAME\"\n    runAsRoot chmod 755 \"$BIN_INSTALL_DIR/$BINARY_NAME\"\n    echo \"$BINARY_NAME installed into $BIN_INSTALL_DIR/$BINARY_NAME\"\n}\n\n# installPkg installs the downloaded version of a package in a deb or rpm format\ninstallPkg() {\n    echo \"Preparing to install $BINARY_NAME ${TAG_WO_VER} from package\"\n    if [ $PKG_FORMAT == \"deb\" ]; then\n        runAsRoot dpkg -i $TMP_FILE\n    elif [ $PKG_FORMAT == \"rpm\" ]; then\n        runAsRoot rpm -U $TMP_FILE\n    fi\n}\n\n# fail_trap is executed if an error occurs.\nfail_trap() {\n    result=$?\n    if [ \"$result\" != \"0\" ]; then\n        if [[ -n \"$INPUT_ARGUMENTS\" ]]; then\n            echo \"Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS\"\n            help\n        else\n            echo \"Failed to install $BINARY_NAME\"\n        fi\n        echo -e \"\\tFor support, go to $REPO_URL/issues\"\n    fi\n    cleanup\n    exit $result\n}\n\n# testVersion tests the installed client to make sure it is working.\ntestVersion() {\n    set +e\n    $BIN_INSTALL_DIR/$BINARY_NAME version\n    if [ \"$?\" = \"1\" ]; then\n        echo \"$BINARY_NAME not found. Is $BIN_INSTALL_DIR in your \"'$PATH?'\n        exit 1\n    fi\n    set -e\n}\n\n# help provides possible cli installation arguments\nhelp() {\n    echo \"Accepted cli arguments are:\"\n    echo -e \"\\t[--help|-h ] ->> prints this help\"\n    echo -e \"\\t[--version|-v <desired_version>] . When not defined it fetches the latest release from GitHub\"\n    echo -e \"\\te.g. --version v0.1.1\"\n    echo -e \"\\t[--use-pkg]  ->> install from deb/rpm packages\"\n    echo -e \"\\t[--no-sudo]  ->> install without sudo\"\n    echo -e \"\\t[--verify-checksum]  ->> verify checksum of the downloaded file\"\n}\n\n# removes temporary directory used to download artefacts\ncleanup() {\n    if [[ -d \"${TMP_ROOT:-}\" ]]; then\n        rm -rf \"$TMP_ROOT\"\n    fi\n}\n\n# Execution\n\n#Stop execution on any error\ntrap \"fail_trap\" EXIT\nset -e\n\n# Parsing input arguments (if any)\nexport INPUT_ARGUMENTS=\"${@}\"\nset -u\nwhile [[ $# -gt 0 ]]; do\n    case $1 in\n    '--version' | -v)\n        shift\n        if [[ $# -ne 0 ]]; then\n            export DESIRED_VERSION=\"v${1}\"\n        else\n            echo -e \"Please provide the desired version. e.g. --version 0.1.1\"\n            exit 0\n        fi\n        ;;\n    '--no-sudo')\n        USE_SUDO=\"false\"\n        ;;\n    '--verify-checksum')\n        VERIFY_CHECKSUM=\"true\"\n        ;;\n    '--use-pkg')\n        USE_PKG=\"true\"\n        ;;\n    '--help' | -h)\n        help\n        exit 0\n        ;;\n    *)\n        exit 1\n        ;;\n    esac\n    shift\ndone\nset +u\n\ndetectArch\ndetectOS\nverifySupported\nsetDesiredVersion\nif ! checkInstalledVersion; then\n    createTempDir\n    verifyOpenssl\n    downloadFile\n    if [ $USE_PKG == \"true\" ]; then\n        installPkg\n    else\n        installFile\n    fi\n    testVersion\n    cleanup\nfi\n"
  },
  {
    "path": "main.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/cmd\"\n)\n\nfunc main() {\n\tcmd.Execute()\n}\n"
  },
  {
    "path": "mkdocs.yml",
    "content": "# © 2022 Nokia.\n#\n# This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n# No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n# This code is provided on an “as is” basis without any warranties of any kind.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsite_name: gNMIc\nnav:\n  - Home: index.md\n\n  - Getting started:\n      - Installation: install.md\n      - Basic usage: basic_usage.md\n      - User guide: user_guide/configuration_intro.md\n      - Command reference: cmd/capabilities.md\n      - Deployment examples: deployments/deployments_intro.md\n      - Changelog: changelog.md\n\n  - User guide:\n      - Configuration:\n        - Introduction: user_guide/configuration_intro.md\n        - Flags: \n            - user_guide/configuration_flags.md\n            - global_flags.md\n        - Environment variables: user_guide/configuration_env.md\n        - File configuration: user_guide/configuration_file.md\n      \n      - Targets: \n          - Configuration: user_guide/targets/targets.md\n          - Session Security: user_guide/targets/targets_session_sec.md\n          - Discovery:\n            - Introduction: user_guide/targets/target_discovery/discovery_intro.md\n            - File Discovery: user_guide/targets/target_discovery/file_discovery.md\n            - Consul Discovery: user_guide/targets/target_discovery/consul_discovery.md\n            - Docker Discovery: user_guide/targets/target_discovery/docker_discovery.md\n            - HTTP Discovery: user_guide/targets/target_discovery/http_discovery.md\n      \n      - Subscriptions: user_guide/subscriptions.md\n\n      - Prompt mode: user_guide/prompt_suggestions.md\n    \n      - gNMI Server: user_guide/gnmi_server.md\n\n      - Tunnel Server: user_guide/tunnel_server.md\n\n      - Inputs:\n        - Introduction: user_guide/inputs/input_intro.md\n        - NATS: user_guide/inputs/nats_input.md\n        - Jetstream: user_guide/inputs/jetstream_input.md\n        - STAN: user_guide/inputs/stan_input.md\n        - Kafka: user_guide/inputs/kafka_input.md\n\n      - Outputs:\n          - Introduction: user_guide/outputs/output_intro.md\n          - File: user_guide/outputs/file_output.md\n          - NATS:\n            - NATS: user_guide/outputs/nats_output.md\n            - STAN: user_guide/outputs/stan_output.md\n            - Jetstream: user_guide/outputs/jetstream_output.md\n          - Kafka: user_guide/outputs/kafka_output.md\n          - InfluxDB: user_guide/outputs/influxdb_output.md\n          - Prometheus:  \n            - Scrape Based (Pull): user_guide/outputs/prometheus_output.md\n            - Remote Write (Push): user_guide/outputs/prometheus_write_output.md\n          - OpenTelemetry: user_guide/outputs/otlp_output.md\n          - gNMI Server: user_guide/outputs/gnmi_output.md\n          - TCP: user_guide/outputs/tcp_output.md\n          - UDP: user_guide/outputs/udp_output.md\n          - SNMP: user_guide/outputs/snmp_output.md\n          - ASCII Graph: user_guide/outputs/asciigraph_output.md\n          \n      - Processors: \n          - Introduction: user_guide/event_processors/intro.md\n          - Add Tag: user_guide/event_processors/event_add_tag.md\n          - Allow: user_guide/event_processors/event_allow.md\n          - Combine: user_guide/event_processors/event_combine.md\n          - Convert: user_guide/event_processors/event_convert.md\n          - Data Convert: user_guide/event_processors/event_data_convert.md\n          - Date string: user_guide/event_processors/event_date_string.md\n          - Delete: user_guide/event_processors/event_delete.md\n          - Drop: user_guide/event_processors/event_drop.md\n          - Duration Convert: user_guide/event_processors/event_duration_convert.md\n          - Extract Tags: user_guide/event_processors/event_extract_tags.md\n          - Group by: user_guide/event_processors/event_group_by.md\n          - IEEE Float32: user_guide/event_processors/event_ieeefloat32.md\n          - JQ: user_guide/event_processors/event_jq.md\n          - Merge: user_guide/event_processors/event_merge.md\n          - Override TS: user_guide/event_processors/event_override_ts.md\n          - Plugin: user_guide/event_processors/event_plugin.md\n          - Rate Limit: user_guide/event_processors/event_rate_limit.md\n          - Starlark: user_guide/event_processors/event_starlark.md\n          - Strings: user_guide/event_processors/event_strings.md\n          - Time Epoch: user_guide/event_processors/event_time_epoch.md\n          - To Tag: user_guide/event_processors/event_to_tag.md\n          - Trigger: user_guide/event_processors/event_trigger.md\n          - Value Tag: user_guide/event_processors/event_value_tag.md\n          - Write: user_guide/event_processors/event_write.md\n\n      - Actions: user_guide/actions/actions.md\n\n      - Caching: user_guide/caching.md\n\n      - Clustering: user_guide/HA.md\n\n      - REST API: \n          - Introduction: user_guide/api/api_intro.md\n          - Configuration: user_guide/api/configuration.md\n          - Targets: user_guide/api/targets.md\n          - Cluster: user_guide/api/cluster.md\n          - Other: user_guide/api/other.md\n\n      - Golang Package:\n          - Introduction: user_guide/golang_package/intro.md\n          - Target Options: user_guide/golang_package/target_options.md\n          - gNMI Options: user_guide/golang_package/gnmi_options.md\n          - Examples:\n              - Capabilities: user_guide/golang_package/examples/capabilities.md\n              - Get: user_guide/golang_package/examples/get.md\n              - Set: user_guide/golang_package/examples/set.md \n              - Subcribe: user_guide/golang_package/examples/subscribe.md\n      \n      - Collector Mode:\n          - Introduction: user_guide/collector/collector_intro.md\n          - Configuration: user_guide/collector/collector_configuration.md\n          - REST API: user_guide/collector/collector_api.md\n          \n  - Command reference:\n      - Capabilities: cmd/capabilities.md\n      - Get: cmd/get.md\n      - Set: cmd/set.md\n      - GetSet: cmd/getset.md\n      - Subscribe: cmd/subscribe.md\n      - Diff:\n        - Diff: cmd/diff/diff.md\n        - Diff Setrequest: cmd/diff/diff_setrequest.md\n        - Diff Set-To-Notifs: cmd/diff/diff_set_to_notifs.md\n      - Listen: cmd/listen.md\n      - Path: cmd/path.md\n      - Prompt: cmd/prompt.md\n      - Generate: \n        - Generate: 'cmd/generate.md'\n        - Generate Path: cmd/generate/generate_path.md\n        - Generate Set-Request: cmd/generate/generate_set_request.md\n      - Processor: cmd/processor.md\n      - Proxy: cmd/proxy.md\n      - Collector: cmd/collector.md\n    \n  - Deployment examples:\n      - Deployments: deployments/deployments_intro.md\n      \n      - gNMIc Single Instance:\n        - NATS Output: \n          - Containerlab: deployments/single-instance/containerlab/nats-output.md\n          - Docker Compose: deployments/single-instance/docker-compose/nats-output.md\n        - Kafka output: \n          - Containerlab: deployments/single-instance/containerlab/kafka-output.md\n          - Docker Compose: deployments/single-instance/docker-compose/kafka-output.md\n        - InfluxDB output: \n          - Containerlab: deployments/single-instance/containerlab/influxdb-output.md\n          - Docker Compose: deployments/single-instance/docker-compose/influxdb-output.md\n        - Prometheus output: \n          - Containerlab: deployments/single-instance/containerlab/prometheus-output.md\n          - Docker Compose: deployments/single-instance/docker-compose/prometheus-output.md\n        - Prometheus Remote Write output:\n          - Containerlab: deployments/single-instance/containerlab/prometheus-remote-write-output.md\n        - Multiple outputs: \n          - Containerlab: deployments/single-instance/containerlab/multiple-outputs.md\n          - Docker Compose: deployments/single-instance/docker-compose/multiple-outputs.md\n\n      - gNMIc Cluster:\n        - InfluxDB output: \n          - Containerlab: deployments/clusters/containerlab/cluster_with_influxdb_output.md\n          - Docker Compose: deployments/clusters/docker-compose/cluster_with_influxdb_output.md\n        - Prometheus output:\n          - Containerlab: deployments/clusters/containerlab/cluster_with_prometheus_output.md\n          - Docker Compose: deployments/clusters/docker-compose/cluster_with_prometheus_output.md\n          - Kubernetes: deployments/clusters/kubernetes/cluster_with_prometheus_output.md\n        - Prometheus output with data replication:\n          - Containerlab: deployments/clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md\n          - Docker Compose: deployments/clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md     \n        - gNMI Server Cluster:\n          - Containerlab: deployments/clusters/containerlab/cluster_with_gnmi_server_and_prometheus_output.md\n\n      - gNMIc Pipeline:\n        - NATS to Prometheus: \n          - Docker Compose: deployments/pipelines/docker-compose/nats_prometheus.md\n        - NATS to InfluxDB: \n          - Docker Compose: deployments/pipelines/docker-compose/nats_influxdb.md\n        - Clustered pipeline: \n          - Docker Compose: deployments/pipelines/docker-compose/gnmic_cluster_nats_prometheus.md\n        - Forked pipeline: \n          - Docker Compose: deployments/pipelines/docker-compose/forked_pipeline.md \n  \n  - Changelog: changelog.md\n  # - Blog: blog/index.md\n\nsite_author: Karim Radhouani\nsite_description: >-\n  gnmi client and collector command line interface\n# Repository\nrepo_name: openconfig/gnmic\nrepo_url: https://github.com/openconfig/gnmic\nedit_uri: \"\"\ntheme:\n  name: material\n\n  features:\n    - navigation.tabs\n    #- navigation.expand\n    - navigation.top\n    #- navigation.sections\n\n  # 404 page\n  static_templates:\n    - 404.html\n\n  # Don't include MkDocs' JavaScript\n  include_search_page: false\n  search_index_only: true\n\n  # Default values, taken from mkdocs_theme.yml\n  language: en\n  palette:\n      # Light mode\n    - media: \"(prefers-color-scheme: light)\"\n      scheme: default\n      primary: blue\n      accent: indigo\n      toggle:\n        icon: material/toggle-switch-off-outline\n        name: Switch to dark mode\n    # Dark mode\n    - media: \"(prefers-color-scheme: dark)\"\n      scheme: slate\n      primary: black\n      accent: cyan\n      toggle:\n        icon: material/toggle-switch\n        name: Switch to light mode\n\n  font:\n    text: Manrope\n    code: Fira Mono\n  icon:\n    logo: octicons/pulse-24\n  favicon: images/pulse.svg\n\nextra_css:\n  - stylesheets/extra.css\n\n# Plugins\nplugins:\n  - search\n  - minify:\n      minify_html: true\n\n# Customization\nextra:\n  social:\n    - icon: fontawesome/brands/github\n      link: https://github.com/karimra\n  analytics:\n    provider: google\n    property: UA-177206500-1\n\n# Extensions\nmarkdown_extensions:\n  - markdown.extensions.admonition\n  - markdown.extensions.attr_list\n  - markdown.extensions.codehilite:\n      guess_lang: false\n  - markdown.extensions.def_list\n  - markdown.extensions.footnotes\n  - markdown.extensions.meta\n  - markdown.extensions.toc:\n      permalink: \"#\"\n  - pymdownx.arithmatex\n  - pymdownx.betterem:\n      smart_enable: all\n  - pymdownx.caret\n  - pymdownx.critic\n  - pymdownx.details\n  - pymdownx.emoji:\n      emoji_index: !!python/name:materialx.emoji.twemoji\n      emoji_generator: !!python/name:materialx.emoji.to_svg\n  - pymdownx.highlight:\n      linenums_style: pymdownx-inline\n  - pymdownx.inlinehilite\n  - pymdownx.keys\n  - pymdownx.magiclink:\n      repo_url_shorthand: true\n      user: squidfunk\n      repo: mkdocs-material\n  - pymdownx.mark\n  - pymdownx.smartsymbols\n  - pymdownx.snippets:\n      check_paths: true\n  - pymdownx.superfences\n  - pymdownx.tabbed:\n      alternate_style: true \n  - pymdownx.tasklist:\n      custom_checkbox: true\n  - pymdownx.tilde\n"
  },
  {
    "path": "pkg/actions/action.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage actions\n\nimport (\n\t\"context\"\n\t\"log\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\ntype Action interface {\n\t// Init initializes an Action given its configuration and a list of options\n\tInit(cfg map[string]interface{}, opts ...Option) error\n\t// Run, well runs the action.\n\t// it takes an action Context which is made of:\n\t//  - `Input`  : an interface{} event message, target name added/deleted,...\n\t//  - `Env`    : a map[string]interface{} containing the output of previous actions\n\t//  - `Vars`   : a map[string]interface{} containing variables passed to the action\n\t//  - `Targets`: a map[string]*types.TargetConfig containing (if the action is ran by a loader)\n\t//               the currently known targets configurations\n\tRun(ctx context.Context, aCtx *Context) (interface{}, error)\n\t// NName returns the configured action name\n\tNName() string\n\t// WithTargets passes the known configured targets to the action when initialized\n\tWithTargets(map[string]*types.TargetConfig)\n\t// WithLogger passes the configured logger to the action\n\tWithLogger(*log.Logger)\n}\n\n// Context defines an action execution context\ntype Context struct {\n\t// Input event message, target name added/deleted,...\n\tInput interface{} `json:\"Input,omitempty\"`\n\t// Env used to store the output of a sequence of actions\n\tEnv map[string]interface{} `json:\"Env,omitempty\"`\n\t// Vars contains the variables passed to the action\n\tVars map[string]interface{} `json:\"Vars,omitempty\"`\n\t// a map of known targets configurations\n\tTargets map[string]*types.TargetConfig `json:\"Targets,omitempty\"`\n}\n\nvar ActionTypes = []string{\n\t\"gnmi\",\n\t\"http\",\n\t\"script\",\n\t\"template\",\n}\n\ntype Option func(Action)\n\nvar Actions = map[string]Initializer{}\n\ntype Initializer func() Action\n\nfunc Register(name string, initFn Initializer) {\n\tActions[name] = initFn\n}\n\nfunc DecodeConfig(src, dst interface{}) error {\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     dst,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn decoder.Decode(src)\n}\n\nfunc WithTargets(tcs map[string]*types.TargetConfig) Option {\n\treturn func(a Action) {\n\t\ta.WithTargets(tcs)\n\t}\n}\n\nfunc WithLogger(l *log.Logger) Option {\n\treturn func(a Action) {\n\t\ta.WithLogger(l)\n\t}\n}\n"
  },
  {
    "path": "pkg/actions/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/actions/gnmi_action\"\n\t_ \"github.com/openconfig/gnmic/pkg/actions/http_action\"\n\t_ \"github.com/openconfig/gnmic/pkg/actions/script_action\"\n\t_ \"github.com/openconfig/gnmic/pkg/actions/template_action\"\n)\n"
  },
  {
    "path": "pkg/actions/gnmi_action/gnmi_action.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_action\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n)\n\nconst (\n\tdefaultRPC      = \"get\"\n\tloggingPrefix   = \"[gnmi_action] \"\n\tactionType      = \"gnmi\"\n\tdefaultDataType = \"ALL\"\n\tdefaultTarget   = `{{ index .Input.Tags \"source\" }}`\n\tdefaultEncoding = \"JSON\"\n\tdefaultFormat   = \"json\"\n)\n\nconst (\n\trpcGet        = \"get\"\n\trpcSet        = \"set\"\n\trpcSetUpdate  = \"set-update\"\n\trpcSetReplace = \"set-replace\"\n\trpcSetDelete  = \"set-delete\"\n\trpcDelete     = \"delete\"\n\trpcSub        = \"sub\"\n\trpcSubscribe  = \"subscribe\"\n)\n\nfunc init() {\n\tactions.Register(actionType, func() actions.Action {\n\t\treturn &gnmiAction{\n\t\t\tlogger:         log.New(io.Discard, \"\", 0),\n\t\t\tm:              new(sync.RWMutex),\n\t\t\ttargetsConfigs: make(map[string]*types.TargetConfig),\n\t\t}\n\t})\n}\n\ntype gnmiAction struct {\n\t// action name\n\tName string `mapstructure:\"name,omitempty\"`\n\t// target of the gNMI RPC, it can be a Go template\n\tTarget string `mapstructure:\"target,omitempty\"`\n\t// gNMI RPC, possible values `get`, `set`, `set-update`,\n\t// `set-replace`, `sub`, `subscribe`\n\tRPC string `mapstructure:\"rpc,omitempty\"`\n\t// gNMI Path Prefix, can be a Go template\n\tPrefix string `mapstructure:\"prefix,omitempty\"`\n\t// list of gNMI Paths, each one can be a Go template\n\tPaths []string `mapstructure:\"paths,omitempty\"`\n\t// gNMI data type in case RPC is `get`,\n\t// possible values: `config`, `state`, `operational`\n\tType string `mapstructure:\"data-type,omitempty\"`\n\t// list of gNMI values, used in case RPC=`set*`\n\tValues []string `mapstructure:\"values,omitempty\"`\n\t// gNMI encoding\n\tEncoding string `mapstructure:\"encoding,omitempty\"`\n\t// Debug\n\tDebug bool `mapstructure:\"debug,omitempty\"`\n\t// Ignore ENV proxy\n\tNoEnvProxy bool `mapstructure:\"no-env-proxy,omitempty\"`\n\t// Response format,\n\t// possible values: `json`, `event`, `prototext`, `protojson`\n\tFormat string `mapstructure:\"format,omitempty\"`\n\n\ttarget *template.Template\n\tprefix *template.Template\n\tpaths  []*template.Template\n\tvalues []*template.Template\n\n\tlogger *log.Logger\n\n\tm              *sync.RWMutex\n\ttargetsConfigs map[string]*types.TargetConfig\n}\n\nfunc (g *gnmiAction) Init(cfg map[string]interface{}, opts ...actions.Option) error {\n\terr := actions.DecodeConfig(cfg, g)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(g)\n\t}\n\tif g.Name == \"\" {\n\t\treturn fmt.Errorf(\"action type %q missing name field\", actionType)\n\t}\n\tg.setDefaults()\n\terr = g.parseTemplates()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = g.validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.logger.Printf(\"action name %q of type %q initialized: %v\", g.Name, actionType, g)\n\treturn nil\n}\n\nfunc (g *gnmiAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) {\n\tg.m.Lock()\n\tfor n, tc := range aCtx.Targets {\n\t\tg.targetsConfigs[n] = tc\n\t}\n\tin := &actions.Context{\n\t\tInput:   aCtx.Input,\n\t\tEnv:     aCtx.Env,\n\t\tVars:    aCtx.Vars,\n\t\tTargets: aCtx.Targets,\n\t}\n\tg.m.Unlock()\n\tb := new(bytes.Buffer)\n\terr := g.target.Execute(b, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttName := b.String()\n\ttargetsConfigs, err := g.selectTargets(tName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tresult := make(map[string]interface{})\n\tresCh := make(chan *gnmiResponse)\n\terrCh := make(chan error)\n\twg := new(sync.WaitGroup)\n\twg.Add(len(targetsConfigs))\n\tfor _, tc := range targetsConfigs {\n\t\tgo func(tc *types.TargetConfig) {\n\t\t\tdefer wg.Done()\n\t\t\t// create new actions.Context to be used by each target\n\t\t\t// run RPC\n\t\t\trb, err := g.runRPC(ctx, tc, &actions.Context{\n\t\t\t\tInput: in.Input,\n\t\t\t\tEnv:   in.Env,\n\t\t\t\tVars:  in.Vars,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresCh <- &gnmiResponse{name: tc.Name, data: rb}\n\t\t}(tc)\n\t}\n\n\terrs := make([]error, 0)\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resp, ok := <-resCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tvar res interface{}\n\t\t\t\t// using yaml.Unmarshal instead of json.Unmarshal to avoid\n\t\t\t\t// treating integers as floats\n\t\t\t\terr = yaml.Unmarshal(resp.data, &res)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t}\n\t\t\t\tresult[resp.name] = res\n\t\t\tcase err := <-errCh:\n\t\t\t\tg.logger.Printf(\"gnmi action error: %v\", err)\n\t\t\t\terrs = append(errs, err)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg.Wait()\n\tclose(resCh) // close result channel\n\t<-doneCh     // wait for the result map to be set\n\tif len(errs) > 0 {\n\t\t// return only the first errors\n\t\treturn nil, errs[0]\n\t}\n\treturn result, nil\n}\n\nfunc (g *gnmiAction) NName() string { return g.Name }\n\nfunc (g *gnmiAction) setDefaults() {\n\tif g.Type == \"\" {\n\t\tg.Type = defaultDataType\n\t}\n\tif g.Encoding == \"\" {\n\t\tg.Encoding = defaultEncoding\n\t}\n\tswitch g.RPC {\n\tcase \"\":\n\t\tg.RPC = defaultRPC\n\tcase rpcSet:\n\t\tg.RPC = rpcSetUpdate\n\tcase rpcDelete:\n\t\tg.RPC = rpcSetDelete\n\tcase rpcSub:\n\t\tg.RPC = rpcSubscribe\n\t}\n\tif g.Target == \"\" {\n\t\tg.Target = defaultTarget\n\t}\n\tif g.Format == \"\" {\n\t\tg.Format = defaultFormat\n\t}\n}\n\nfunc (g *gnmiAction) validate() error {\n\tnumPaths := len(g.Paths)\n\tif numPaths == 0 {\n\t\treturn errors.New(\"paths field is required\")\n\t}\n\tswitch g.RPC {\n\tcase rpcGet, rpcSetDelete, rpcDelete:\n\tcase rpcSetUpdate, rpcSetReplace:\n\t\tnumValues := len(g.values)\n\t\tif numValues == 0 {\n\t\t\treturn errors.New(\"values field is required when RPC is set\")\n\t\t}\n\t\tif numPaths != len(g.values) {\n\t\t\treturn errors.New(\"number of paths and values do not match\")\n\t\t}\n\tcase rpcSub, rpcSubscribe:\n\t\tif strings.ToLower(g.Format) != \"json\" &&\n\t\t\tstrings.ToLower(g.Format) != \"protojson\" &&\n\t\t\tstrings.ToLower(g.Format) != \"event\" {\n\t\t\treturn fmt.Errorf(\"unsupported format %q\", g.Format)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown gnmi RPC %q\", g.RPC)\n\t}\n\treturn nil\n}\n\nfunc (g *gnmiAction) parseTemplates() error {\n\tvar err error\n\tg.target, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-target\", g.Name), g.Target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.prefix, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-prefix\", g.Name), g.Prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.paths, err = g.createTemplates(\"path\", g.Paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.values, err = g.createTemplates(\"value\", g.Values)\n\treturn err\n}\n\nfunc (g *gnmiAction) createTemplates(n string, s []string) ([]*template.Template, error) {\n\ttpls := make([]*template.Template, 0, len(s))\n\tfor i, p := range s {\n\t\ttpl, err := gtemplate.CreateTemplate(fmt.Sprintf(\"%s-%s-%d\", g.Name, n, i), p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttpls = append(tpls, tpl)\n\t}\n\treturn tpls, nil\n}\n\nfunc (g *gnmiAction) createGetRequest(in *actions.Context) (*gnmi.GetRequest, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, 3)\n\tgnmiOpts = append(gnmiOpts, api.Encoding(g.Encoding))\n\tgnmiOpts = append(gnmiOpts, api.DataType(g.Type))\n\n\tvar err error\n\tb := new(bytes.Buffer)\n\tif g.Prefix != \"\" {\n\t\terr = g.prefix.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"prefix parse error: %v\", err)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, api.Prefix(b.String()))\n\t}\n\n\tfor _, p := range g.paths {\n\t\tb.Reset()\n\t\terr = p.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"path parse error: %v\", err)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, api.Path(b.String()))\n\t}\n\n\treturn api.NewGetRequest(gnmiOpts...)\n}\n\nfunc (g *gnmiAction) createSetRequest(in *actions.Context) (*gnmi.SetRequest, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, len(g.paths))\n\tvar err error\n\tb := new(bytes.Buffer)\n\tif g.Prefix != \"\" {\n\t\terr = g.prefix.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"prefix parse error: %v\", err)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, api.Prefix(b.String()))\n\t}\n\tfor i, p := range g.paths {\n\t\tb.Reset()\n\t\terr = p.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"path parse error: %v\", err)\n\t\t}\n\t\tsPath := b.String()\n\t\tswitch g.RPC {\n\t\tcase rpcSetDelete:\n\t\t\tgnmiOpts = append(gnmiOpts, api.Delete(sPath))\n\t\tcase rpcSetUpdate:\n\t\t\tb.Reset()\n\t\t\terr = g.values[i].Execute(b, in)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"value %d parse error: %v\", i, err)\n\t\t\t}\n\t\t\tgnmiOpts = append(gnmiOpts, api.Update(\n\t\t\t\tapi.Path(sPath),\n\t\t\t\tapi.Value(b.String(), g.Encoding),\n\t\t\t))\n\t\tcase rpcSetReplace:\n\t\t\tb.Reset()\n\t\t\terr = g.values[i].Execute(b, in)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"value %d parse error: %v\", i, err)\n\t\t\t}\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Replace(\n\t\t\t\t\tapi.Path(sPath),\n\t\t\t\t\tapi.Value(b.String(), g.Encoding),\n\t\t\t\t))\n\t\t}\n\t}\n\treturn api.NewSetRequest(gnmiOpts...)\n}\n\nfunc (g *gnmiAction) createSubscribeRequest(in *actions.Context) (*gnmi.SubscribeRequest, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, 2+len(g.paths))\n\tgnmiOpts = append(gnmiOpts,\n\t\tapi.Encoding(g.Encoding),\n\t\tapi.SubscriptionListModeONCE(),\n\t)\n\t//\n\tvar err error\n\tb := new(bytes.Buffer)\n\tif g.Prefix != \"\" {\n\t\terr = g.prefix.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"prefix template exec error: %v\", err)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, api.Prefix(b.String()))\n\t}\n\tfor _, p := range g.paths {\n\t\tb.Reset()\n\t\terr = p.Execute(b, in)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"path template exec error: %v\", err)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, api.Subscription(\n\t\t\tapi.Path(b.String())))\n\t}\n\treturn api.NewSubscribeRequest(gnmiOpts...)\n}\n\nfunc (g *gnmiAction) selectTargets(tName string) ([]*types.TargetConfig, error) {\n\tif tName == \"\" {\n\t\treturn nil, nil\n\t}\n\n\ttargets := make([]*types.TargetConfig, 0, len(g.targetsConfigs))\n\tg.m.RLock()\n\tdefer g.m.RUnlock()\n\t// select all targets\n\tif tName == \"all\" {\n\t\tfor _, tc := range g.targetsConfigs {\n\t\t\ttargets = append(targets, tc)\n\t\t}\n\t\treturn targets, nil\n\t}\n\t// select a few targets\n\ttNames := strings.Split(tName, \",\")\n\tfor _, name := range tNames {\n\t\tif tc, ok := g.targetsConfigs[name]; ok {\n\t\t\ttargets = append(targets, tc)\n\t\t}\n\t}\n\treturn targets, nil\n}\n\nfunc (g *gnmiAction) runRPC(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) {\n\tswitch g.RPC {\n\tcase rpcGet:\n\t\treturn g.runGet(ctx, tc, in)\n\tcase rpcSetUpdate, rpcSetReplace, rpcSetDelete:\n\t\treturn g.runSet(ctx, tc, in)\n\tcase rpcSubscribe: // once\n\t\treturn g.runSubscribe(ctx, tc, in)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown RPC %q\", g.RPC)\n\t}\n}\n\nfunc (g *gnmiAction) runGet(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) {\n\tt := target.NewTarget(tc)\n\treq, err := g.createGetRequest(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = t.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer t.Close()\n\tresp, err := t.Get(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"target %q GetRequest failed: %v\", t.Config.Name, err)\n\t}\n\tmo := &formatters.MarshalOptions{Format: g.Format}\n\treturn mo.Marshal(resp, nil)\n}\n\nfunc (g *gnmiAction) runSet(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) {\n\tt := target.NewTarget(tc)\n\treq, err := g.createSetRequest(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = t.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"target %q SetRequest failed: %v\", t.Config.Name, err)\n\t}\n\tdefer t.Close()\n\tresp, err := t.Set(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmo := &formatters.MarshalOptions{Format: g.Format}\n\treturn mo.Marshal(resp, nil)\n}\n\nfunc (g *gnmiAction) runSubscribe(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) {\n\tt := target.NewTarget(tc)\n\treq, err := g.createSubscribeRequest(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = t.CreateGNMIClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer t.Close()\n\tresponses, err := t.SubscribeOnce(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmo := &formatters.MarshalOptions{Format: g.Format}\n\tformattedResponse := make([]interface{}, 0, len(responses))\n\tm := map[string]string{\n\t\t\"source\": tc.Name,\n\t}\n\tfor _, r := range responses {\n\t\tmsgb, err := mo.Marshal(r, m)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar v interface{}\n\t\terr = json.Unmarshal(msgb, &v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tformattedResponse = append(formattedResponse, utils.Convert(v))\n\t}\n\treturn json.Marshal(formattedResponse)\n}\n\ntype gnmiResponse struct {\n\tname string\n\tdata []byte\n}\n"
  },
  {
    "path": "pkg/actions/gnmi_action/gnmi_action_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_action\n\nimport (\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype getRequestTestItem struct {\n\tinput  *formatters.EventMsg\n\toutput *gnmi.GetRequest\n}\n\ntype setRequestTestItem struct {\n\tinput  *formatters.EventMsg\n\toutput *gnmi.SetRequest\n}\n\nvar getRequestTestSet = map[string]struct {\n\tactionType string\n\taction     map[string]interface{}\n\ttests      []getRequestTestItem\n}{\n\t\"get_no_templates\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"gnmi\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"paths\": []string{\"/path\"},\n\t\t\t\"debug\": true,\n\t\t\t\"vars\":  nil,\n\t\t},\n\t\ttests: []getRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: nil,\n\t\t\t\toutput: &gnmi.GetRequest{\n\t\t\t\t\tPath: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"path\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"get_with_templates_in_path\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"gnmi\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"paths\": []string{`/{{.Input.Name}}`},\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []getRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.GetRequest{\n\t\t\t\t\tPath: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"get_with_templates_in_prefix\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":   \"gnmi\",\n\t\t\t\"name\":   \"act1\",\n\t\t\t\"prefix\": `/{{.Input.Name}}`,\n\t\t\t\"paths\":  []string{`/{{.Input.Name}}`},\n\t\t\t\"debug\":  true,\n\t\t},\n\t\ttests: []getRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.GetRequest{\n\t\t\t\t\tPrefix: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tPath: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nvar setRequestTestSet = map[string]struct {\n\tactionType string\n\taction     map[string]interface{}\n\ttests      []setRequestTestItem\n}{\n\t\"set_no_templates\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":   \"gnmi\",\n\t\t\t\"name\":   \"act1\",\n\t\t\t\"rpc\":    \"set\",\n\t\t\t\"paths\":  []string{\"/path\"},\n\t\t\t\"values\": []string{\"value1\"},\n\t\t\t\"debug\":  true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: nil,\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"set_with_templates_in_path\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":   \"gnmi\",\n\t\t\t\"name\":   \"act1\",\n\t\t\t\"rpc\":    \"set\",\n\t\t\t\"paths\":  []string{\"/{{.Input.Name}}\"},\n\t\t\t\"values\": []string{\"value1\"},\n\t\t\t\"debug\":  true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// changing a value via set update\n\t\"set_with_template_in_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":   \"gnmi\",\n\t\t\t\"name\":   \"act1\",\n\t\t\t\"rpc\":    \"set\",\n\t\t\t\"paths\":  []string{`{{ range $k, $v := .Input.Values }}{{if eq $k \"path1\" }}{{$k}}{{end}}{{end}}`},\n\t\t\t\"values\": []string{`{{ range $k, $v := .Input.Values }}{{if eq $k \"path1\" }}value2{{end}}{{end}}`},\n\t\t\t\"debug\":  true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"path1\": \"value1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// changing multiple values via set update\n\t\"set_with_multiple_templates_in_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\": \"gnmi\",\n\t\t\t\"name\": \"act1\",\n\t\t\t\"rpc\":  \"set\",\n\t\t\t\"paths\": []string{\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if eq $k \"path1\" }}{{$k}}{{end}}{{end}}`,\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if eq $k \"path2\" }}{{$k}}{{end}}{{end}}`,\n\t\t\t},\n\t\t\t\"values\": []string{\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if eq $k \"path1\" }}value11{{end}}{{end}}`,\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if eq $k \"path2\" }}value22{{end}}{{end}}`,\n\t\t\t},\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"path1\": \"value1\",\n\t\t\t\t\t\t\"path2\": \"value2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value11\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path2\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value22\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// changing a value via set replace\n\t\"set_replace_with_template_in_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":   \"gnmi\",\n\t\t\t\"name\":   \"act1\",\n\t\t\t\"rpc\":    \"set-replace\",\n\t\t\t\"paths\":  []string{`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path1\") (eq $v \"value1\")}}{{$k}}{{end}}{{end}}`},\n\t\t\t\"values\": []string{`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path1\") (eq $v \"value1\")}}value2{{end}}{{end}}`},\n\t\t\t\"debug\":  true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"path1\": \"value1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// changing multiple values via set update replace\n\t\"set_replace_with_multiple_templates_in_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\": \"gnmi\",\n\t\t\t\"name\": \"act1\",\n\t\t\t\"rpc\":  \"set-replace\",\n\t\t\t\"paths\": []string{\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path1\") (eq $v \"value1\")}}{{$k}}{{end}}{{end}}`,\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path2\") (eq $v \"value2\")}}{{$k}}{{end}}{{end}}`,\n\t\t\t},\n\t\t\t\"values\": []string{\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path1\") (eq $v \"value1\")}}value11{{end}}{{end}}`,\n\t\t\t\t`{{ range $k, $v := .Input.Values }}{{if and (eq $k \"path2\") (eq $v \"value2\")}}value22{{end}}{{end}}`,\n\t\t\t},\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []setRequestTestItem{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"path1\": \"value1\",\n\t\t\t\t\t\t\"path2\": \"value2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: &gnmi.SetRequest{\n\t\t\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path1\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value11\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"path2\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value22\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestGnmiGetRequest(t *testing.T) {\n\tfor name, ts := range getRequestTestSet {\n\t\tif ai, ok := actions.Actions[ts.actionType]; ok {\n\t\t\tt.Log(\"found action\")\n\t\t\ta := ai()\n\t\t\terr := a.Init(ts.action)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize action: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"action: %+v\", a)\n\t\t\tga := a.(*gnmiAction)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\tgReq, err := ga.createGetRequest(&actions.Context{Input: item.input})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Logf(\"failed: %v\", err)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t\tif !testutils.GetRequestsEqual(gReq, item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, expected %+v, got: %+v\", name, i, item.output, gReq)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"action %q not found\", ts.actionType)\n\t\t}\n\t}\n}\n\nfunc TestGnmiSetRequest(t *testing.T) {\n\tfor name, ts := range setRequestTestSet {\n\t\tif ai, ok := actions.Actions[ts.actionType]; ok {\n\t\t\tt.Log(\"found action\")\n\t\t\ta := ai()\n\t\t\terr := a.Init(ts.action)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize action: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"action: %+v\", a)\n\t\t\tga := a.(*gnmiAction)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\tgReq, err := ga.createSetRequest(&actions.Context{Input: item.input})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Logf(\"failed: %v\", err)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t\tif !testutils.SetRequestsEqual(gReq, item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, expected %+v, got: %+v\", name, i, item.output, gReq)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"action %q not found\", ts.actionType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/actions/gnmi_action/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_action\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (g *gnmiAction) WithTargets(tcs map[string]*types.TargetConfig) {\n\tif tcs == nil {\n\t\treturn\n\t}\n\tg.targetsConfigs = tcs\n}\n\nfunc (g *gnmiAction) WithLogger(logger *log.Logger) {\n\tif g.Debug && logger != nil {\n\t\tg.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags())\n\t} else if g.Debug {\n\t\tg.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/actions/http_action/http_action.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_action\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tdefaultMethod       = \"GET\"\n\tdefaultTimeout      = 5 * time.Second\n\tloggingPrefix       = \"[http_action] \"\n\tactionType          = \"http\"\n\tdefaultBodyTemplate = \"{{ json . }}\"\n)\n\nfunc init() {\n\tactions.Register(actionType, func() actions.Action {\n\t\treturn &httpAction{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\ntype httpAction struct {\n\tName    string            `mapstructure:\"name,omitempty\"`\n\tMethod  string            `mapstructure:\"method,omitempty\"`\n\tURL     string            `mapstructure:\"url,omitempty\"`\n\tHeaders map[string]string `mapstructure:\"headers,omitempty\"`\n\tTimeout time.Duration     `mapstructure:\"timeout,omitempty\"`\n\tBody    string            `mapstructure:\"body,omitempty\"`\n\tDebug   bool              `mapstructure:\"debug,omitempty\"`\n\n\turl    *template.Template\n\tbody   *template.Template\n\tlogger *log.Logger\n}\n\nfunc (h *httpAction) Init(cfg map[string]interface{}, opts ...actions.Option) error {\n\terr := actions.DecodeConfig(cfg, h)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(h)\n\t}\n\tif h.Name == \"\" {\n\t\treturn fmt.Errorf(\"action type %q missing name field\", actionType)\n\t}\n\terr = h.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.body, err = template.New(\"body\").Funcs(funcMap).Parse(h.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.url, err = template.New(\"url\").Funcs(funcMap).Parse(h.URL)\n\treturn err\n}\n\nfunc (h *httpAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) {\n\tif h.url == nil {\n\t\treturn nil, errors.New(\"missing url template\")\n\t}\n\tif h.body == nil {\n\t\treturn nil, errors.New(\"missing body template\")\n\t}\n\tin := &actions.Context{\n\t\tInput:   aCtx.Input,\n\t\tEnv:     aCtx.Env,\n\t\tVars:    aCtx.Vars,\n\t\tTargets: aCtx.Targets,\n\t}\n\tb := new(bytes.Buffer)\n\terr := json.NewEncoder(b).Encode(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb.Reset()\n\terr = h.body.Execute(b, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turl := new(bytes.Buffer)\n\terr = h.url.Execute(url, in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th.logger.Printf(\"url: %s\", url.String())\n\th.logger.Printf(\"body: %s\", b.String())\n\n\treq, err := http.NewRequest(h.Method, url.String(), b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor k, v := range h.Headers {\n\t\treq.Header.Add(k, v)\n\t}\n\tclient := &http.Client{\n\t\tTimeout: h.Timeout,\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tresp, err := client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusOK {\n\t\tbodyBytes, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn bodyBytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"status code=%d\", resp.StatusCode)\n}\n\nfunc (h *httpAction) NName() string { return h.Name }\n\nfunc (h *httpAction) setDefaults() error {\n\t// if !strings.HasPrefix(h.URL, \"http\") {\n\t// \th.URL = \"http://\" + h.URL\n\t// }\n\t// if _, err := url.Parse(h.URL); err != nil {\n\t// \treturn err\n\t// }\n\tif h.Method == \"\" {\n\t\th.Method = defaultMethod\n\t}\n\th.Method = strings.ToUpper(h.Method)\n\tswitch h.Method {\n\tcase http.MethodConnect:\n\t\tbreak\n\tcase http.MethodDelete:\n\t\tbreak\n\tcase http.MethodGet:\n\t\tbreak\n\tcase http.MethodHead:\n\t\tbreak\n\tcase http.MethodOptions:\n\t\tbreak\n\tcase http.MethodPatch:\n\t\tbreak\n\tcase http.MethodPost:\n\t\tbreak\n\tcase http.MethodPut:\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"method %q not allowed\", h.Method)\n\t}\n\tif h.Timeout <= 0 {\n\t\th.Timeout = defaultTimeout\n\t}\n\tif h.Body == \"\" {\n\t\th.Body = defaultBodyTemplate\n\t}\n\treturn nil\n}\n\nfunc (h *httpAction) WithTargets(map[string]*types.TargetConfig) {}\n\nfunc (h *httpAction) WithLogger(logger *log.Logger) {\n\tif h.Debug && logger != nil {\n\t\th.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags())\n\t} else if h.Debug {\n\t\th.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nvar funcMap = template.FuncMap{\n\t\"json\": func(v interface{}) string {\n\t\ta, _ := json.Marshal(v)\n\t\treturn string(a)\n\t},\n\t\"name\": func(v interface{}) string {\n\t\tvar result interface{}\n\t\tswitch v := v.(type) {\n\t\tcase *formatters.EventMsg:\n\t\t\tresult = v.Name\n\t\tdefault:\n\t\t\treturn \"\"\n\t\t}\n\t\ta, _ := json.Marshal(result)\n\t\treturn string(a)\n\t},\n\t\"withTags\": func(v interface{}, keys ...string) string {\n\t\tswitch v := v.(type) {\n\t\tcase *formatters.EventMsg:\n\t\t\ttags := v.Tags\n\t\t\tv.Tags = make(map[string]string)\n\t\t\tfor _, k := range keys {\n\t\t\t\tif vv, ok := tags[k]; ok {\n\t\t\t\t\tv.Tags[k] = vv\n\t\t\t\t}\n\t\t\t}\n\t\t\ta, _ := json.Marshal(v)\n\t\t\treturn string(a)\n\t\tcase string:\n\t\t\tmsg := make(map[string]interface{})\n\t\t\tjson.Unmarshal([]byte(v), &msg)\n\t\t\ttags := msg[\"tags\"]\n\t\t\tif tags == nil {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\ttagsMap, ok := tags.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\tnewTags := make(map[string]interface{})\n\t\t\tfor _, k := range keys {\n\t\t\t\tif vv, ok := tagsMap[k]; ok {\n\t\t\t\t\tnewTags[k] = vv\n\t\t\t\t}\n\t\t\t}\n\t\t\tdelete(msg, \"tags\")\n\t\t\tif len(newTags) > 0 {\n\t\t\t\tmsg[\"tags\"] = newTags\n\t\t\t}\n\t\t\ta, _ := json.Marshal(msg)\n\t\t\treturn string(a)\n\t\t}\n\t\treturn \"\"\n\t},\n\t\"withValues\": func(v interface{}, keys ...string) string {\n\t\tswitch v := v.(type) {\n\t\tcase *formatters.EventMsg:\n\t\t\tvalues := v.Values\n\t\t\tv.Values = make(map[string]interface{})\n\t\t\tfor _, k := range keys {\n\t\t\t\tif vv, ok := values[k]; ok {\n\t\t\t\t\tv.Values[k] = vv\n\t\t\t\t}\n\t\t\t}\n\t\t\ta, _ := json.Marshal(v)\n\t\t\treturn string(a)\n\t\tcase string:\n\t\t\tmsg := make(map[string]interface{})\n\t\t\tjson.Unmarshal([]byte(v), &msg)\n\t\t\tvalues := msg[\"values\"]\n\t\t\tif values == nil {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\tvaluesMap, ok := values.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\tnewValues := make(map[string]interface{})\n\t\t\tfor _, k := range keys {\n\t\t\t\tif vv, ok := valuesMap[k]; ok {\n\t\t\t\t\tnewValues[k] = vv\n\t\t\t\t}\n\t\t\t}\n\t\t\tdelete(msg, \"values\")\n\t\t\tif len(newValues) > 0 {\n\t\t\t\tmsg[\"values\"] = newValues\n\t\t\t}\n\t\t\ta, _ := json.Marshal(msg)\n\t\t\treturn string(a)\n\t\t}\n\n\t\treturn \"\"\n\t},\n\t\"withoutTags\": func(v interface{}, keys ...string) string {\n\t\tswitch v := v.(type) {\n\t\tcase *formatters.EventMsg:\n\t\t\tfor _, k := range keys {\n\t\t\t\tdelete(v.Tags, k)\n\t\t\t}\n\t\t\ta, _ := json.Marshal(v)\n\t\t\treturn string(a)\n\t\tcase string:\n\t\t\tmsg := make(map[string]interface{})\n\t\t\tjson.Unmarshal([]byte(v), &msg)\n\t\t\ttags := msg[\"tags\"]\n\t\t\tif tags == nil {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\tswitch tags := msg[\"tags\"].(type) {\n\t\t\tcase map[string]interface{}:\n\t\t\t\tfor _, k := range keys {\n\t\t\t\t\tdelete(tags, k)\n\t\t\t\t}\n\t\t\t\tmsg[\"tags\"] = tags\n\t\t\t}\n\t\t\ta, _ := json.Marshal(msg)\n\t\t\treturn string(a)\n\t\t}\n\t\treturn \"\"\n\t},\n\t\"withoutValues\": func(v interface{}, keys ...string) string {\n\t\tswitch v := v.(type) {\n\t\tcase *formatters.EventMsg:\n\t\t\tfor _, k := range keys {\n\t\t\t\tdelete(v.Values, k)\n\t\t\t}\n\t\t\ta, _ := json.Marshal(v)\n\t\t\treturn string(a)\n\t\tcase string:\n\t\t\tmsg := make(map[string]interface{})\n\t\t\tjson.Unmarshal([]byte(v), &msg)\n\t\t\tif msg[\"values\"] == nil {\n\t\t\t\ta, _ := json.Marshal(msg)\n\t\t\t\treturn string(a)\n\t\t\t}\n\t\t\tswitch values := msg[\"values\"].(type) {\n\t\t\tcase map[string]interface{}:\n\t\t\t\tfor _, k := range keys {\n\t\t\t\t\tdelete(values, k)\n\t\t\t\t}\n\t\t\t\tmsg[\"values\"] = values\n\t\t\t}\n\t\t\ta, _ := json.Marshal(msg)\n\t\t\treturn string(a)\n\t\t}\n\t\treturn \"\"\n\t},\n}\n"
  },
  {
    "path": "pkg/actions/http_action/http_action_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_action\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  *formatters.EventMsg\n\toutput interface{}\n}\n\nvar testset = map[string]struct {\n\tactionType string\n\taction     map[string]interface{}\n\ttests      []item\n}{\n\t\"default_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"URL\":   \"http://localhost:8080\",\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"Input\": map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"with_simple_template\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ name .Input }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: \"sub1\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub2\",\n\t\t\t\t},\n\t\t\t\toutput: \"sub2\",\n\t\t\t},\n\t\t},\n\t},\n\t\"remove_all_tags\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withTags .Input }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"remove_some_tags\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withoutTags .Input \"tag1\" }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"select_some_tags\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withTags .Input \"tag1\" }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t\"tag3\": \"3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"remove_all_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withValues .Input }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"remove_some_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withoutValues .Input \"val1\"}}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t\t\"val2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\t\"val2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"select_some_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withValues .Input \"val1\" }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"select_tags_and_values\": {\n\t\tactionType: actionType,\n\t\taction: map[string]interface{}{\n\t\t\t\"type\":  \"http\",\n\t\t\t\"name\":  \"act1\",\n\t\t\t\"url\":   \"http://localhost:8080\",\n\t\t\t\"body\":  `{{ withTags (withValues .Input \"val1\") \"tag1\" }}`,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t\t\"val2\": \"2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: map[string]interface{}{\n\t\t\t\t\t\"name\": \"sub1\",\n\t\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\t\"val1\": \"1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestHTTPAction(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif ai, ok := actions.Actions[ts.actionType]; ok {\n\t\t\tt.Log(\"found action\")\n\t\t\ta := ai()\n\t\t\terr := a.Init(ts.action, actions.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize action: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"action: %+v\", a)\n\t\t\tmux := http.NewServeMux()\n\t\t\tmux.Handle(\"/\", echo())\n\t\t\tah, ok := a.(*httpAction)\n\t\t\tif !ok {\n\t\t\t\tt.Errorf(\"failed to assert action type: %T\", a)\n\t\t\t\tt.Fail()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// start http server\n\t\t\turlAddr, err := url.Parse(ah.URL)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to parse URL: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts := &http.Server{\n\t\t\t\tAddr:    urlAddr.Host,\n\t\t\t\tHandler: mux,\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tif err := s.ListenAndServe(); err != nil {\n\t\t\t\t\tif !errors.Is(err, http.ErrServerClosed) {\n\t\t\t\t\t\tt.Logf(\"failed to start http server: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\t// wait for server\n\t\t\ttime.Sleep(time.Second)\n\t\t\t//\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\tres, err := a.Run(context.TODO(), &actions.Context{Input: item.input})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, %v\", name, i, err)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tt.Logf(\"Run result: %+v\", string(res.([]byte)))\n\t\t\t\t\tvar result interface{}\n\t\t\t\t\terr = json.Unmarshal(res.([]byte), &result)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, %v\", name, i, err)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif !reflect.DeepEqual(result, item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, expected %+v(%T), got: %+v(%T)\", name, i, item.output, item.output, result, result)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)\n\t\t\ts.Shutdown(ctx)\n\t\t\tcancel()\n\t\t} else {\n\t\t\tt.Errorf(\"action %s not found\", ts.actionType)\n\t\t}\n\t}\n}\n\nfunc echo() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tb, err := io.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"%v\", err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(b))\n\t})\n}\n"
  },
  {
    "path": "pkg/actions/script_action/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage script_action\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (s *scriptAction) WithTargets(map[string]*types.TargetConfig) {}\n\nfunc (s *scriptAction) WithLogger(logger *log.Logger) {\n\tif s.Debug && logger != nil {\n\t\ts.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags())\n\t} else if s.Debug {\n\t\ts.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/actions/script_action/script_action.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage script_action\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n)\n\nconst (\n\tloggingPrefix = \"[script_action] \"\n\tactionType    = \"script\"\n\tdefaultShell  = \"/bin/bash\"\n)\n\nfunc init() {\n\tactions.Register(actionType, func() actions.Action {\n\t\treturn &scriptAction{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\ntype scriptAction struct {\n\tName    string `mapstructure:\"name,omitempty\"`\n\tShell   string `mapstructure:\"shell,omitempty\"`\n\tCommand string `mapstructure:\"command,omitempty\"`\n\tFile    string `mapstructure:\"file,omitempty\"`\n\tDebug   bool   `mapstructure:\"debug,omitempty\"`\n\n\tlogger *log.Logger\n}\n\nfunc (s *scriptAction) Init(cfg map[string]interface{}, opts ...actions.Option) error {\n\terr := actions.DecodeConfig(cfg, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(s)\n\t}\n\tif s.Name == \"\" {\n\t\treturn fmt.Errorf(\"action type %q missing name field\", actionType)\n\t}\n\terr = s.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.logger.Printf(\"action name %q of type %q initialized: %v\", s.Name, actionType, s)\n\treturn nil\n}\n\nfunc (s *scriptAction) Run(_ context.Context, aCtx *actions.Context) (interface{}, error) {\n\tif s.Command == \"\" && s.File == \"\" {\n\t\treturn nil, nil\n\t}\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\tvar cmd *exec.Cmd\n\tif s.Command != \"\" {\n\t\tcmds := strings.Split(s.Command, \"\\n\")\n\t\targs := append([]string{\"-c\"}, strings.Join(cmds, \"; \"))\n\t\tcmd = exec.Command(s.Shell, args...)\n\t}\n\tif s.File != \"\" {\n\t\tcmd = exec.Command(s.File)\n\t}\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\tcmd.Env = os.Environ()\n\tfor k, v := range aCtx.Env {\n\t\tk = strings.ReplaceAll(k, \"-\", \"_\")\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\tfor k, v := range aCtx.Vars {\n\t\tk = strings.ReplaceAll(k, \"-\", \"_\")\n\t\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%v: %s\", err, stderr.String())\n\t}\n\tif stderr.String() != \"\" {\n\t\treturn map[string]string{\"stderr\": stderr.String()}, nil\n\t}\n\treturn map[string]string{\"stdout\": stdout.String()}, nil\n}\n\nfunc (s *scriptAction) NName() string { return s.Name }\n\nfunc (s *scriptAction) setDefaults() error {\n\tif s.Shell == \"\" {\n\t\ts.Shell = defaultShell\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/actions/template_action/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage template_action\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (t *templateAction) WithTargets(map[string]*types.TargetConfig) {}\n\nfunc (t *templateAction) WithLogger(logger *log.Logger) {\n\tif t.Debug && logger != nil {\n\t\tt.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags())\n\t} else if t.Debug {\n\t\tt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/actions/template_action/template_action.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage template_action\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"text/template\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n)\n\nconst (\n\tloggingPrefix   = \"[template_action] \"\n\tactionType      = \"template\"\n\tdefaultTemplate = \"{{ . }}\"\n)\n\nfunc init() {\n\tactions.Register(actionType, func() actions.Action {\n\t\treturn &templateAction{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\ntype templateAction struct {\n\tName         string `mapstructure:\"name,omitempty\"`\n\tTemplate     string `mapstructure:\"template,omitempty\"`\n\tTemplateFile string `mapstructure:\"template-file,omitempty\"`\n\tOutput       string `mapstructure:\"output,omitempty\"`\n\tDebug        bool   `mapstructure:\"debug,omitempty\"`\n\n\ttpl    *template.Template\n\tlogger *log.Logger\n}\n\nfunc (t *templateAction) Init(cfg map[string]interface{}, opts ...actions.Option) error {\n\terr := actions.DecodeConfig(cfg, t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(t)\n\t}\n\tif t.Name == \"\" {\n\t\treturn fmt.Errorf(\"action type %q missing name field\", actionType)\n\t}\n\terr = t.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif t.Template != \"\" {\n\t\tt.tpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-template-action\", t.Name), t.Template)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if t.TemplateFile != \"\" {\n\t\tt.tpl, err = template.ParseGlob(t.TemplateFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.tpl = t.tpl.Funcs(gtemplate.NewTemplateEngine().CreateFuncs()).\n\t\t\tOption(\"missingkey=zero\")\n\t}\n\tt.logger.Printf(\"action name %q of type %q initialized: %v\", t.Name, actionType, t)\n\treturn nil\n}\n\nfunc (t *templateAction) Run(_ context.Context, aCtx *actions.Context) (interface{}, error) {\n\tb := new(bytes.Buffer)\n\terr := t.tpl.Execute(b, &actions.Context{\n\t\tInput:   aCtx.Input,\n\t\tEnv:     aCtx.Env,\n\t\tVars:    aCtx.Vars,\n\t\tTargets: aCtx.Targets,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := b.String()\n\tif t.Debug {\n\t\tt.logger.Printf(\"template output: %s\", out)\n\t}\n\tswitch t.Output {\n\tcase \"stdout\":\n\t\tfmt.Fprint(os.Stdout, out)\n\tcase \"\":\n\tdefault:\n\t\tfi, err := os.Create(t.Output)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = fi.Write(b.Bytes())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn out, nil\n}\n\nfunc (t *templateAction) NName() string { return t.Name }\n\nfunc (t *templateAction) setDefaults() error {\n\tif t.Template == \"\" && t.TemplateFile == \"\" {\n\t\tt.Template = defaultTemplate\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/api/gnmi_msgs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\tgvalue \"github.com/openconfig/gnmi/value\"\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"google.golang.org/protobuf/proto\"\n\t\"google.golang.org/protobuf/types/known/durationpb\"\n)\n\nconst (\n\tDefaultGNMIVersion = \"0.7.0\"\n\tencodingJSON       = \"json\"\n\tencodingJSON_IETF  = \"json_ietf\"\n)\n\n// GNMIOption is a function that acts on the supplied proto.Message.\n// The message is expected to be one of the protobuf defined gNMI messages\n// exchanged by the RPCs or any of the nested messages.\ntype GNMIOption func(proto.Message) error\n\n// ErrInvalidMsgType is returned by a GNMIOption in case the Option is supplied\n// an unexpected proto.Message\nvar ErrInvalidMsgType = errors.New(\"invalid message type\")\n\n// ErrInvalidValue is returned by a GNMIOption in case the Option is supplied\n// an unexpected value.\nvar ErrInvalidValue = errors.New(\"invalid value\")\n\n// apply is a helper function that simply applies the options to the proto.Message.\n// It returns an error if any of the options fails.\nfunc apply(m proto.Message, opts ...GNMIOption) error {\n\tfor _, o := range opts {\n\t\tif err := o(m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// NewCapabilitiesRequest creates a new *gnmi.CapabilityeRequest using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewCapabilitiesRequest(opts ...GNMIOption) (*gnmi.CapabilityRequest, error) {\n\tm := new(gnmi.CapabilityRequest)\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// NewCapabilitiesResponse creates a new *gnmi.CapabilityResponse using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewCapabilitiesResponse(opts ...GNMIOption) (*gnmi.CapabilityResponse, error) {\n\tm := new(gnmi.CapabilityResponse)\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif m.GNMIVersion == \"\" {\n\t\tm.GNMIVersion = DefaultGNMIVersion\n\t}\n\treturn m, nil\n}\n\n// NewGetRequest creates a new *gnmi.GetRequest using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewGetRequest(opts ...GNMIOption) (*gnmi.GetRequest, error) {\n\tm := new(gnmi.GetRequest)\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// NewGetResponse creates a new *gnmi.GetResponse using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewGetResponse(opts ...GNMIOption) (*gnmi.GetResponse, error) {\n\tm := new(gnmi.GetResponse)\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// NewSetRequest creates a new *gnmi.SetRequest using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewSetRequest(opts ...GNMIOption) (*gnmi.SetRequest, error) {\n\tm := new(gnmi.SetRequest)\n\terr := apply(m, opts...)\n\treturn m, err\n}\n\n// NewSetResponse creates a new *gnmi.SetResponse using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewSetResponse(opts ...GNMIOption) (*gnmi.SetResponse, error) {\n\tm := new(gnmi.SetResponse)\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// NewSubscribeRequest creates a new *gnmi.SubscribeRequest using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewSubscribeRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error) {\n\tm := &gnmi.SubscribeRequest{\n\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\tSubscribe: new(gnmi.SubscriptionList),\n\t\t},\n\t}\n\terr := apply(m, opts...)\n\treturn m, err\n}\n\n// NewSubscribePollRequest creates a new *gnmi.SubscribeRequest with request type Poll\n// using the provided GNMIOption list.\n// returns an error in case one of the options is invalid\nfunc NewSubscribePollRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error) {\n\tm := &gnmi.SubscribeRequest{\n\t\tRequest: &gnmi.SubscribeRequest_Poll{\n\t\t\tPoll: new(gnmi.Poll),\n\t\t},\n\t}\n\terr := apply(m, opts...)\n\treturn m, err\n}\n\n// NewSubscribeResponse creates a *gnmi.SubscribeResponse with a gnmi.SubscribeResponse_Update as\n// response type.\nfunc NewSubscribeResponse(opts ...GNMIOption) (*gnmi.SubscribeResponse, error) {\n\tm := &gnmi.SubscribeResponse{\n\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\tUpdate: new(gnmi.Notification),\n\t\t},\n\t}\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// NewSubscribeResponse creates a *gnmi.SubscribeResponse with a gnmi.SubscribeResponse_SyncResponse as\n// response type.\nfunc NewSubscribeSyncResponse(opts ...GNMIOption) (*gnmi.SubscribeResponse, error) {\n\tm := &gnmi.SubscribeResponse{\n\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{\n\t\t\tSyncResponse: true,\n\t\t},\n\t}\n\terr := apply(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Messages options\n\n// Version sets the provided gNMI version string in a gnmi.CapabilityResponse message.\nfunc Version(v string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.CapabilityResponse:\n\t\t\tmsg.GNMIVersion = v\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Version: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SupportedEncoding creates an GNMIOption that sets the provided encodings as supported encodings in a gnmi.CapabilitiesResponse\nfunc SupportedEncoding(encodings ...string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.CapabilityResponse:\n\t\t\tif len(msg.SupportedEncodings) == 0 {\n\t\t\t\tmsg.SupportedEncodings = make([]gnmi.Encoding, 0)\n\t\t\t}\n\t\t\tfor _, encoding := range encodings {\n\t\t\t\tenc, ok := gnmi.Encoding_value[strings.ToUpper(encoding)]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"option SupportedEncoding: %w: %s\", ErrInvalidValue, encoding)\n\t\t\t\t}\n\t\t\t\tmsg.SupportedEncodings = append(msg.SupportedEncodings, gnmi.Encoding(enc))\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SupportedEncoding: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SupportedModel creates an GNMIOption that sets the provided name, org and version as a supported model in a gnmi.CapabilitiesResponse.\nfunc SupportedModel(name, org, version string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.CapabilityResponse:\n\t\t\tif len(msg.SupportedModels) == 0 {\n\t\t\t\tmsg.SupportedModels = make([]*gnmi.ModelData, 0)\n\t\t\t}\n\t\t\tmsg.SupportedModels = append(msg.SupportedModels,\n\t\t\t\t&gnmi.ModelData{\n\t\t\t\t\tName:         name,\n\t\t\t\t\tOrganization: org,\n\t\t\t\t\tVersion:      version,\n\t\t\t\t})\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SupportedModel: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Extension creates a GNMIOption that applies the supplied gnmi_ext.Extension to the provided\n// proto.Message.\nfunc Extension(ext *gnmi_ext.Extension) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.CapabilityRequest:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.GetRequest:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.GetResponse:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.SetRequest:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.SetResponse:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tif len(msg.Extension) == 0 {\n\t\t\t\tmsg.Extension = make([]*gnmi_ext.Extension, 0)\n\t\t\t}\n\t\t\tmsg.Extension = append(msg.Extension, ext)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Extension_CommitRequest(id string, dur time.Duration) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_Commit{\n\t\t\t\t\t\tCommit: &gnmi_ext.Commit{\n\t\t\t\t\t\t\tId: id,\n\t\t\t\t\t\t\tAction: &gnmi_ext.Commit_Commit{\n\t\t\t\t\t\t\t\tCommit: &gnmi_ext.CommitRequest{\n\t\t\t\t\t\t\t\t\tRollbackDuration: durationpb.New(dur),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_CommitRequest: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\nfunc Extension_CommitConfirm(id string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_Commit{\n\t\t\t\t\t\tCommit: &gnmi_ext.Commit{\n\t\t\t\t\t\t\tId: id,\n\t\t\t\t\t\t\tAction: &gnmi_ext.Commit_Confirm{\n\t\t\t\t\t\t\t\tConfirm: &gnmi_ext.CommitConfirm{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_CommitConfirm: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\nfunc Extension_CommitCancel(id string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_Commit{\n\t\t\t\t\t\tCommit: &gnmi_ext.Commit{\n\t\t\t\t\t\t\tId: id,\n\t\t\t\t\t\t\tAction: &gnmi_ext.Commit_Cancel{\n\t\t\t\t\t\t\t\tCancel: &gnmi_ext.CommitCancel{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_CommitCancel: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\nfunc Extension_CommitSetRollbackDuration(id string, dur time.Duration) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_Commit{\n\t\t\t\t\t\tCommit: &gnmi_ext.Commit{\n\t\t\t\t\t\t\tId: id,\n\t\t\t\t\t\t\tAction: &gnmi_ext.Commit_SetRollbackDuration{\n\t\t\t\t\t\t\t\tSetRollbackDuration: &gnmi_ext.CommitSetRollbackDuration{\n\t\t\t\t\t\t\t\t\tRollbackDuration: durationpb.New(dur),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_CommitSetRollbackDuration: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\nfunc Extension_Depth(lvl uint32) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest, *gnmi.SubscribeRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_Depth{\n\t\t\t\t\t\tDepth: &gnmi_ext.Depth{\n\t\t\t\t\t\t\tLevel: lvl,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_Depth: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\n// Extension_HistorySnapshotTime creates a GNMIOption that adds a gNMI extension of\n// type History Snapshot with the supplied snapshot time.\n// the snapshot value can be nanoseconds since Unix epoch or a date in RFC3339 format\nfunc Extension_HistorySnapshotTime(tm time.Time) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_History{\n\t\t\t\t\t\tHistory: &gnmi_ext.History{\n\t\t\t\t\t\t\tRequest: &gnmi_ext.History_SnapshotTime{\n\t\t\t\t\t\t\t\tSnapshotTime: tm.UnixNano(),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_HistorySnapshotTime: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\n// Extension_HistoryRange creates a GNMIOption that adds a gNMI extension of\n// type History TimeRange with the supplied start and end times.\n// the start/end values can be nanoseconds since Unix epoch or a date in RFC3339 format\nfunc Extension_HistoryRange(start, end time.Time) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tfn := Extension(\n\t\t\t\t&gnmi_ext.Extension{\n\t\t\t\t\tExt: &gnmi_ext.Extension_History{\n\t\t\t\t\t\tHistory: &gnmi_ext.History{\n\t\t\t\t\t\t\tRequest: &gnmi_ext.History_Range{\n\t\t\t\t\t\t\t\tRange: &gnmi_ext.TimeRange{\n\t\t\t\t\t\t\t\t\tStart: start.UnixNano(),\n\t\t\t\t\t\t\t\t\tEnd:   end.UnixNano(),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t\treturn fn(msg)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Extension_HistoryRange: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t}\n}\n\n// Prefix creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied\n// proto.Message (as a Path Prefix).\n// The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Prefix(prefix string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tvar err error\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tmsg.Prefix, err = path.CreatePrefix(prefix, \"\")\n\t\tcase *gnmi.SetRequest:\n\t\t\tmsg.Prefix, err = path.CreatePrefix(prefix, \"\")\n\t\tcase *gnmi.SetResponse:\n\t\t\tmsg.Prefix, err = path.CreatePrefix(prefix, \"\")\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Prefix, err = path.CreatePrefix(prefix, \"\")\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option Prefix: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tcase *gnmi.Notification:\n\t\t\tmsg.Prefix, err = path.CreatePrefix(prefix, \"\")\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Prefix: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Target creates a GNMIOption that set the gnmi Prefix target to the supplied string value.\n// The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Target(target string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tif target == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tvar err error\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tif msg.Prefix == nil {\n\t\t\t\tmsg.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tmsg.Prefix.Target = target\n\t\tcase *gnmi.SetRequest:\n\t\t\tif msg.Prefix == nil {\n\t\t\t\tmsg.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tmsg.Prefix.Target = target\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tif msg.Subscribe.Prefix == nil {\n\t\t\t\t\tmsg.Subscribe.Prefix = new(gnmi.Path)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Prefix.Target = target\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option Target: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Target: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn err\n\t}\n}\n\n// Path creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.Subscription.\nfunc Path(p string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tvar err error\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tgp, err := path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\t\tif len(msg.Path) == 0 {\n\t\t\t\tmsg.Path = make([]*gnmi.Path, 0)\n\t\t\t}\n\t\t\tmsg.Path = append(msg.Path, gp)\n\t\tcase *gnmi.Update:\n\t\t\tmsg.Path, err = path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\tcase *gnmi.UpdateResult:\n\t\t\tmsg.Path, err = path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\tcase *gnmi.Subscription:\n\t\t\tmsg.Path, err = path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Path: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Encoding creates a GNMIOption that adds the encoding type to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Encoding(encoding string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tenc, ok := gnmi.Encoding_value[strings.ToUpper(encoding)]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"option Encoding: %w: %s\", ErrInvalidValue, encoding)\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tmsg.Encoding = gnmi.Encoding(enc)\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Encoding = gnmi.Encoding(enc)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Encoding: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// EncodingJSON creates a GNMIOption that sets the encoding type to JSON in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingJSON() func(msg proto.Message) error {\n\treturn Encoding(\"JSON\")\n}\n\n// EncodingBYTES creates a GNMIOption that sets the encoding type to BYTES in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingBYTES() func(msg proto.Message) error {\n\treturn Encoding(\"BYTES\")\n}\n\n// EncodingPROTO creates a GNMIOption that sets the encoding type to PROTO in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingPROTO() func(msg proto.Message) error {\n\treturn Encoding(\"PROTO\")\n}\n\n// EncodingASCII creates a GNMIOption that sets the encoding type to ASCII in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingASCII() func(msg proto.Message) error {\n\treturn Encoding(\"ASCII\")\n}\n\n// EncodingJSON_IETF creates a GNMIOption that sets the encoding type to JSON_IETF in a gnmi.GetRequest or\n// gnmi.SubscribeRequest.\nfunc EncodingJSON_IETF() func(msg proto.Message) error {\n\treturn Encoding(\"JSON_IETF\")\n}\n\n// EncodingCustom creates a GNMIOption that adds the encoding type to the supplied proto.Message\n// which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe.\n// Unlike Encoding, this GNMIOption does not validate if the provided encoding is defined by the gNMI spec.\nfunc EncodingCustom(enc int) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tmsg.Encoding = gnmi.Encoding(enc)\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Encoding = gnmi.Encoding(enc)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option EncodingCustom: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// DataType creates a GNMIOption that adds the data type to the supplied proto.Message\n// which must be a *gnmi.GetRequest.\nfunc DataType(datat string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tif datat == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tdt, ok := gnmi.GetRequest_DataType_value[strings.ToUpper(datat)]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"option DataType: %w: %s\", ErrInvalidValue, datat)\n\t\t\t}\n\t\t\tmsg.Type = gnmi.GetRequest_DataType(dt)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option DataType: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// DataTypeALL creates a GNMIOption that sets the gnmi.GetRequest data type to ALL\nfunc DataTypeALL() func(msg proto.Message) error {\n\treturn DataType(\"ALL\")\n}\n\n// DataTypeCONFIG creates a GNMIOption that sets the gnmi.GetRequest data type to CONFIG\nfunc DataTypeCONFIG() func(msg proto.Message) error {\n\treturn DataType(\"CONFIG\")\n}\n\n// DataTypeSTATE creates a GNMIOption that sets the gnmi.GetRequest data type to STATE\nfunc DataTypeSTATE() func(msg proto.Message) error {\n\treturn DataType(\"STATE\")\n}\n\n// DataTypeOPERATIONAL creates a GNMIOption that sets the gnmi.GetRequest data type to OPERATIONAL\nfunc DataTypeOPERATIONAL() func(msg proto.Message) error {\n\treturn DataType(\"OPERATIONAL\")\n}\n\n// UseModel creates a GNMIOption that add a gnmi.DataModel to a gnmi.GetRequest or gnmi.SubscribeRequest\n// based on the name, org and version strings provided.\nfunc UseModel(name, org, version string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetRequest:\n\t\t\tif len(msg.UseModels) == 0 {\n\t\t\t\tmsg.UseModels = make([]*gnmi.ModelData, 0)\n\t\t\t}\n\t\t\tmsg.UseModels = append(msg.UseModels,\n\t\t\t\t&gnmi.ModelData{\n\t\t\t\t\tName:         name,\n\t\t\t\t\tOrganization: org,\n\t\t\t\t\tVersion:      version,\n\t\t\t\t})\n\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tif len(msg.Subscribe.UseModels) == 0 {\n\t\t\t\t\tmsg.Subscribe.UseModels = make([]*gnmi.ModelData, 0)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.UseModels = append(msg.Subscribe.UseModels,\n\t\t\t\t\t&gnmi.ModelData{\n\t\t\t\t\t\tName:         name,\n\t\t\t\t\t\tOrganization: org,\n\t\t\t\t\t\tVersion:      version,\n\t\t\t\t\t})\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option UseModel: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Update creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message,\n// the supplied message must be a *gnmi.SetRequest.\nfunc Update(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tif len(msg.Update) == 0 {\n\t\t\t\tmsg.Update = make([]*gnmi.Update, 0)\n\t\t\t}\n\t\t\tupd := new(gnmi.Update)\n\t\t\terr := apply(upd, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.Update = append(msg.Update, upd)\n\t\tcase *gnmi.Notification:\n\t\t\tif len(msg.Update) == 0 {\n\t\t\t\tmsg.Update = make([]*gnmi.Update, 0)\n\t\t\t}\n\t\t\tupd := new(gnmi.Update)\n\t\t\terr := apply(upd, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.Update = append(msg.Update, upd)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Update: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Replace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.SetRequest.\nfunc Replace(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tif len(msg.Update) == 0 {\n\t\t\t\tmsg.Update = make([]*gnmi.Update, 0)\n\t\t\t}\n\t\t\tupd := new(gnmi.Update)\n\t\t\terr := apply(upd, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.Replace = append(msg.Replace, upd)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Replace: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Value creates a GNMIOption that creates a *gnmi.TypedValue and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.Update.\n// If a map is supplied as `data interface{}` it has to be a map[string]interface{}.\nfunc Value(data interface{}, encoding string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tvar err error\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Update:\n\t\t\tmsg.Val, err = value(data, encoding)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Value: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc value(data interface{}, encoding string) (*gnmi.TypedValue, error) {\n\tswitch data := data.(type) {\n\tcase []interface{}, []string:\n\t\tswitch strings.ToLower(encoding) {\n\t\tcase \"\":\n\t\t\tencoding = encodingJSON\n\t\t\tfallthrough\n\t\tcase encodingJSON:\n\t\t\tb, err := json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: bytes.Trim(b, \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\tcase encodingJSON_IETF:\n\t\t\tb, err := json.Marshal(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: bytes.Trim(b, \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\tdefault:\n\t\t\treturn gvalue.FromScalar(data)\n\t\t}\n\tcase map[string]interface{}:\n\t\tswitch strings.ToLower(encoding) {\n\t\tcase \"\":\n\t\t\tencoding = encodingJSON\n\t\t\tfallthrough\n\t\tcase encodingJSON:\n\t\t\tb := new(bytes.Buffer)\n\t\t\tenc := json.NewEncoder(b)\n\t\t\tenc.SetEscapeHTML(false)\n\t\t\terr := enc.Encode(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: bytes.Trim(b.Bytes(), \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\tcase encodingJSON_IETF:\n\t\t\tb := new(bytes.Buffer)\n\t\t\tenc := json.NewEncoder(b)\n\t\t\tenc.SetEscapeHTML(false)\n\t\t\terr := enc.Encode(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: bytes.Trim(b.Bytes(), \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\t}\n\tcase string:\n\t\tswitch strings.ToLower(encoding) {\n\t\tcase \"\":\n\t\t\tencoding = encodingJSON\n\t\t\tfallthrough\n\t\tcase encodingJSON:\n\t\t\tb := new(bytes.Buffer)\n\t\t\tif json.Valid([]byte(data)) {\n\t\t\t\tb.WriteString(data)\n\t\t\t} else {\n\t\t\t\tenc := json.NewEncoder(b)\n\t\t\t\tenc.SetEscapeHTML(false)\n\t\t\t\terr := enc.Encode(data)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: bytes.Trim(b.Bytes(), \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\tcase encodingJSON_IETF:\n\t\t\tb := new(bytes.Buffer)\n\t\t\tif json.Valid([]byte(data)) {\n\t\t\t\tb.WriteString(data)\n\t\t\t} else {\n\t\t\t\tenc := json.NewEncoder(b)\n\t\t\t\tenc.SetEscapeHTML(false)\n\n\t\t\t\terr := enc.Encode(data)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: bytes.Trim(b.Bytes(), \" \\r\\n\\t\"),\n\t\t\t\t}}, nil\n\t\tcase \"ascii\":\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\tAsciiVal: data,\n\t\t\t\t}}, nil\n\t\tcase \"bool\":\n\t\t\tbval, err := strconv.ParseBool(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_BoolVal{\n\t\t\t\t\tBoolVal: bval,\n\t\t\t\t}}, nil\n\t\tcase \"bytes\":\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_BytesVal{\n\t\t\t\t\tBytesVal: []byte(data),\n\t\t\t\t}}, nil\n\t\tcase \"decimal\":\n\t\t\treturn nil, fmt.Errorf(\"decimal type not implemented\")\n\t\tcase \"float\":\n\t\t\tf, err := strconv.ParseFloat(data, 32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_FloatVal{\n\t\t\t\t\tFloatVal: float32(f),\n\t\t\t\t}}, nil\n\t\tcase \"int\":\n\t\t\tk, err := strconv.ParseInt(data, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_IntVal{\n\t\t\t\t\tIntVal: k,\n\t\t\t\t}}, nil\n\t\tcase \"uint\":\n\t\t\tu, err := strconv.ParseUint(data, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_UintVal{\n\t\t\t\t\tUintVal: u,\n\t\t\t\t}}, nil\n\t\tcase \"string\":\n\t\t\treturn &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_StringVal{\n\t\t\t\t\tStringVal: data,\n\t\t\t\t}}, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid encoding %s\", encoding)\n\t\t}\n\tcase *gnmi.TypedValue:\n\t\treturn data, nil\n\tcase *gnmi.TypedValue_AnyVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_AsciiVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_BoolVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_BytesVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_DecimalVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_FloatVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_IntVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_JsonVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_LeaflistVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_ProtoBytes:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_StringVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tcase *gnmi.TypedValue_UintVal:\n\t\treturn &gnmi.TypedValue{Value: data}, nil\n\tdefault:\n\t\tv, err := gvalue.FromScalar(data)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t}\n\t\treturn v, nil\n\t}\n\treturn nil, fmt.Errorf(\"unexpected value type and encoding %w: %T and %s\", ErrInvalidValue, data, encoding)\n}\n\n// Delete creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.SetRequest. The *gnmi.Path is added the .Delete list.\nfunc Delete(p string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tgp, err := path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\t\tif len(msg.Delete) == 0 {\n\t\t\t\tmsg.Delete = make([]*gnmi.Path, 0)\n\t\t\t}\n\t\t\tmsg.Delete = append(msg.Delete, gp)\n\t\tcase *gnmi.Notification:\n\t\t\tgp, err := path.ParsePath(p)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %v\", ErrInvalidValue, err)\n\t\t\t}\n\t\t\tif len(msg.Delete) == 0 {\n\t\t\t\tmsg.Delete = make([]*gnmi.Path, 0)\n\t\t\t}\n\t\t\tmsg.Delete = append(msg.Delete, gp)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Delete: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SubscriptionListMode creates a GNMIOption that sets the SubscribeRequest Mode.\n// The variable mode must be one of \"once\", \"poll\" or \"stream\".\n// The supplied proto.Message must be a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc SubscriptionListMode(mode string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tgmode, ok := gnmi.SubscriptionList_Mode_value[strings.ToUpper(mode)]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"option SubscriptionListMode: %w: %s\", ErrInvalidValue, mode)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Mode = gnmi.SubscriptionList_Mode(gmode)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option SubscriptionListMode: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SubscriptionListMode: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SubscriptionListModeSTREAM creates a GNMIOption that sets the Subscription List Mode to STREAM\nfunc SubscriptionListModeSTREAM() func(msg proto.Message) error {\n\treturn SubscriptionListMode(\"STREAM\")\n}\n\n// SubscriptionListModeONCE creates a GNMIOption that sets the Subscription List Mode to ONCE\nfunc SubscriptionListModeONCE() func(msg proto.Message) error {\n\treturn SubscriptionListMode(\"ONCE\")\n}\n\n// SubscriptionListModePOLL creates a GNMIOption that sets the Subscription List Mode to POLL\nfunc SubscriptionListModePOLL() func(msg proto.Message) error {\n\treturn SubscriptionListMode(\"POLL\")\n}\n\n// Qos creates a GNMIOption that sets the QosMarking field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Qos(qos uint32) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Qos = &gnmi.QOSMarking{Marking: qos}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option Qos: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Qos: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// AllowAggregation creates a GNMIOption that sets the AllowAggregation field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc AllowAggregation(b bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.AllowAggregation = b\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option AllowAggregation: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option AllowAggregation: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// UpdatesOnly creates a GNMIOption that sets the UpdatesOnly field in a *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc UpdatesOnly(b bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.UpdatesOnly = b\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option UpdatesOnly: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option UpdatesOnly: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// UpdatesOnly creates a GNMIOption that creates a *gnmi.Subscription based on the supplied GNMIOption(s) and adds it the\n// supplied proto.Mesage which must be of type *gnmi.SubscribeRequest with RequestType Subscribe.\nfunc Subscription(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeRequest:\n\t\t\tswitch msg := msg.Request.(type) {\n\t\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\t\tif msg.Subscribe == nil {\n\t\t\t\t\tmsg.Subscribe = new(gnmi.SubscriptionList)\n\t\t\t\t}\n\t\t\t\tif len(msg.Subscribe.Subscription) == 0 {\n\t\t\t\t\tmsg.Subscribe.Subscription = make([]*gnmi.Subscription, 0)\n\t\t\t\t}\n\t\t\t\tsub := new(gnmi.Subscription)\n\t\t\t\terr := apply(sub, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmsg.Subscribe.Subscription = append(msg.Subscribe.Subscription, sub)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"option Subscription: %w: %T\", ErrInvalidMsgType, msg)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Subscription: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SubscriptionMode creates a GNMIOption that sets the Subscription mode in a proto.Message of type *gnmi.Subscription.\nfunc SubscriptionMode(mode string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Subscription:\n\t\t\tgmode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(strings.ReplaceAll(mode, \"-\", \"_\"))]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"option SubscriptionMode: %w: %s\", ErrInvalidValue, mode)\n\t\t\t}\n\t\t\tmsg.Mode = gnmi.SubscriptionMode(gmode)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SubscriptionMode: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SubscriptionModeTARGET_DEFINED creates a GNMIOption that sets the subscription mode to TARGET_DEFINED\nfunc SubscriptionModeTARGET_DEFINED() func(msg proto.Message) error {\n\treturn SubscriptionMode(\"TARGET_DEFINED\")\n}\n\n// SubscriptionModeON_CHANGE creates a GNMIOption that sets the subscription mode to ON_CHANGE\nfunc SubscriptionModeON_CHANGE() func(msg proto.Message) error {\n\treturn SubscriptionMode(\"ON_CHANGE\")\n}\n\n// SubscriptionModeSAMPLE creates a GNMIOption that sets the subscription mode to SAMPLE\nfunc SubscriptionModeSAMPLE() func(msg proto.Message) error {\n\treturn SubscriptionMode(\"SAMPLE\")\n}\n\n// SampleInterval creates a GNMIOption that sets the SampleInterval in a proto.Message of type *gnmi.Subscription.\nfunc SampleInterval(d time.Duration) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Subscription:\n\t\t\tmsg.SampleInterval = uint64(d.Nanoseconds())\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SampleInterval: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// HeartbeatInterval creates a GNMIOption that sets the HeartbeatInterval in a proto.Message of type *gnmi.Subscription.\nfunc HeartbeatInterval(d time.Duration) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Subscription:\n\t\t\tmsg.HeartbeatInterval = uint64(d.Nanoseconds())\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option HeartbeatInterval: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// SuppressRedundant creates a GNMIOption that sets the SuppressRedundant in a proto.Message of type *gnmi.Subscription.\nfunc SuppressRedundant(s bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Subscription:\n\t\t\tmsg.SuppressRedundant = s\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SuppressRedundant: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Notification creates a GNMIOption that builds a gnmi.Notification from the supplied GNMIOptions and adds it\n// to the supplied proto.Message\nfunc Notification(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.GetResponse:\n\t\t\tif len(msg.Notification) == 0 {\n\t\t\t\tmsg.Notification = make([]*gnmi.Notification, 0)\n\t\t\t}\n\t\t\tnotif := new(gnmi.Notification)\n\t\t\terr := apply(notif, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.Notification = append(msg.Notification, notif)\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tswitch msg := msg.Response.(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tnotif := new(gnmi.Notification)\n\t\t\t\terr := apply(notif, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmsg.Update = notif\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Notification: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Timestamp sets the supplied timestamp in a gnmi.Notification message\nfunc Timestamp(t int64) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Notification:\n\t\t\tmsg.Timestamp = t\n\t\tcase *gnmi.SetResponse:\n\t\t\tmsg.Timestamp = t\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Timestamp: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// TimestampNow is the same as Timestamp(time.Now().UnixNano())\nfunc TimestampNow() func(msg proto.Message) error {\n\treturn Timestamp(time.Now().UnixNano())\n}\n\n// Atomic sets the .Atomic field in a gnmi.Notification message\nfunc Atomic(b bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.Notification:\n\t\t\tmsg.Atomic = b\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Atomic: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// UpdateResult creates a GNMIOption that creates a gnmi.UpdateResult and adds it to\n// a proto.Message of type gnmi.SetResponse.\nfunc UpdateResult(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetResponse:\n\t\t\tif len(msg.Response) == 0 {\n\t\t\t\tmsg.Response = make([]*gnmi.UpdateResult, 0)\n\t\t\t}\n\t\t\tupdRes := new(gnmi.UpdateResult)\n\t\t\terr := apply(updRes, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.Response = append(msg.Response, updRes)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option UpdateResult: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Operation creates a GNMIOption that sets the gnmi.UpdateResult_Operation\n// value in a gnmi.UpdateResult.\nfunc Operation(oper string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.UpdateResult:\n\t\t\tsetOper, ok := gnmi.UpdateResult_Operation_value[strings.ToUpper(oper)]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"option Operation: %w: %s\", ErrInvalidValue, oper)\n\t\t\t}\n\t\t\tmsg.Op = gnmi.UpdateResult_Operation(setOper)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Operation: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// OperationINVALID creates a GNMIOption that sets the gnmi.SetResponse Operation to INVALID\nfunc OperationINVALID() func(msg proto.Message) error {\n\treturn Operation(\"INVALID\")\n}\n\n// OperationDELETE creates a GNMIOption that sets the gnmi.SetResponse Operation to DELETE\nfunc OperationDELETE() func(msg proto.Message) error {\n\treturn Operation(\"DELETE\")\n}\n\n// OperationREPLACE creates a GNMIOption that sets the gnmi.SetResponse Operation to REPLACE\nfunc OperationREPLACE() func(msg proto.Message) error {\n\treturn Operation(\"REPLACE\")\n}\n\n// OperationUPDATE creates a GNMIOption that sets the gnmi.SetResponse Operation to UPDATE\nfunc OperationUPDATE() func(msg proto.Message) error {\n\treturn Operation(\"UPDATE\")\n}\n\n// UnionReplace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message.\n// the supplied message must be a *gnmi.SetRequest.\nfunc UnionReplace(opts ...GNMIOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SetRequest:\n\t\t\tif len(msg.UnionReplace) == 0 {\n\t\t\t\tmsg.UnionReplace = make([]*gnmi.Update, 0)\n\t\t\t}\n\t\t\tupd := new(gnmi.Update)\n\t\t\terr := apply(upd, opts...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsg.UnionReplace = append(msg.UnionReplace, upd)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option UnionReplace: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "pkg/api/gnmi_msgs_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n)\n\n// Capabilities Request / Response tests\nfunc TestNewCapabilitiesRequest(t *testing.T) {\n\tname := \"single_case\"\n\tt.Run(name, func(t *testing.T) {\n\t\tnreq, err := NewCapabilitiesRequest()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(new(gnmi.CapabilityRequest), nreq) {\n\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\tt.Errorf(\"expected %+v\", &gnmi.CapabilityRequest{})\n\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\ntype capResponseInput struct {\n\topts []GNMIOption\n\treq  *gnmi.CapabilityResponse\n\terr  error\n}\n\nvar capResponseTestSet = map[string]capResponseInput{\n\t\"simple\": {\n\t\topts: []GNMIOption{\n\t\t\tSupportedEncoding(\"json\", \"json_ietf\"),\n\t\t},\n\t\treq: &gnmi.CapabilityResponse{\n\t\t\tSupportedEncodings: []gnmi.Encoding{\n\t\t\t\tgnmi.Encoding_JSON,\n\t\t\t\tgnmi.Encoding_JSON_IETF,\n\t\t\t},\n\t\t\tGNMIVersion: DefaultGNMIVersion,\n\t\t},\n\t\terr: nil,\n\t},\n\t\"custom_version\": {\n\t\topts: []GNMIOption{\n\t\t\tVersion(\"1.0.0\"),\n\t\t\tSupportedEncoding(\"json\", \"json_ietf\"),\n\t\t},\n\t\treq: &gnmi.CapabilityResponse{\n\t\t\tSupportedEncodings: []gnmi.Encoding{\n\t\t\t\tgnmi.Encoding_JSON,\n\t\t\t\tgnmi.Encoding_JSON_IETF,\n\t\t\t},\n\t\t\tGNMIVersion: \"1.0.0\",\n\t\t},\n\t\terr: nil,\n\t},\n\t\"unsupported_encoding\": {\n\t\topts: []GNMIOption{\n\t\t\tVersion(DefaultGNMIVersion),\n\t\t\tSupportedEncoding(\"not_json\", \"json_ietf\"),\n\t\t},\n\t\treq: &gnmi.CapabilityResponse{\n\t\t\tSupportedEncodings: []gnmi.Encoding{\n\t\t\t\tgnmi.Encoding_JSON,\n\t\t\t\tgnmi.Encoding_JSON_IETF,\n\t\t\t},\n\t\t\tGNMIVersion: DefaultGNMIVersion,\n\t\t},\n\t\terr: ErrInvalidValue,\n\t},\n}\n\nfunc TestNewCapabilitiesResponse(t *testing.T) {\n\tfor name, item := range capResponseTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewCapabilitiesResponse(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\t\tt.Errorf(\"%q expected err : %v\", name, item.err)\n\t\t\t\t\tt.Errorf(\"%q got err      : %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.CapabilitiesResponsesEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\tt.Errorf(\"%q expected result : %+v\", name, item.req)\n\t\t\t\tt.Errorf(\"%q got result      : %+v\", name, nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Get Request / Response tests\ntype getRequestInput struct {\n\topts []GNMIOption\n\treq  *gnmi.GetRequest\n}\n\nvar getRequestTestSet = map[string]getRequestInput{\n\t\"path\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"extension\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tExtension(&gnmi_ext.Extension{Ext: &gnmi_ext.Extension_History{}}),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExtension: []*gnmi_ext.Extension{\n\t\t\t\t{Ext: &gnmi_ext.Extension_History{}},\n\t\t\t},\n\t\t},\n\t},\n\t\"two_paths\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tPath(\"system/gnmi-server\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gnmi-server\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"prefix\": {\n\t\topts: []GNMIOption{\n\t\t\tPrefix(\"system/name\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"target\": {\n\t\topts: []GNMIOption{\n\t\t\tTarget(\"target1\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tTarget: \"target1\",\n\t\t\t},\n\t\t},\n\t},\n\t\"prefix_target_path\": {\n\t\topts: []GNMIOption{\n\t\t\tPrefix(\"system\"),\n\t\t\tPath(\"name\"),\n\t\t\tTarget(\"target1\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tTarget: \"target1\",\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"data_type_ALL\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataTypeALL(),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: gnmi.GetRequest_ALL,\n\t\t},\n\t},\n\t\"data_type_CONFIG\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataTypeCONFIG(),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: gnmi.GetRequest_CONFIG,\n\t\t},\n\t},\n\t\"data_type_STATE\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataTypeSTATE(),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: gnmi.GetRequest_STATE,\n\t\t},\n\t},\n\t\"data_type_OPERATIONAL\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataTypeOPERATIONAL(),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: gnmi.GetRequest_OPERATIONAL,\n\t\t},\n\t},\n\t\"encoding\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataType(\"config\"),\n\t\t\tEncoding(\"json_ietf\"),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType:     gnmi.GetRequest_CONFIG,\n\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n\t\t},\n\t},\n\t\"encoding_custom\": {\n\t\topts: []GNMIOption{\n\t\t\tPath(\"system/name\"),\n\t\t\tDataType(\"config\"),\n\t\t\tEncodingCustom(42),\n\t\t},\n\t\treq: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"system\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType:     gnmi.GetRequest_CONFIG,\n\t\t\tEncoding: gnmi.Encoding(42),\n\t\t},\n\t},\n}\n\nfunc TestNewGetRequest(t *testing.T) {\n\tfor name, item := range getRequestTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewGetRequest(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.GetRequestsEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype getResponseInput struct {\n\topts []GNMIOption\n\treq  *gnmi.GetResponse\n}\n\nvar getResponseTestSet = map[string]getResponseInput{\n\t\"simple\": {\n\t\topts: []GNMIOption{\n\t\t\tNotification(\n\t\t\t\tTimestamp(42),\n\t\t\t\tUpdate(\n\t\t\t\t\tPath(\"/system/name\"),\n\t\t\t\t\tValue(\"srl1\", \"json_ietf\"),\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.GetResponse{\n\t\t\tNotification: []*gnmi.Notification{\n\t\t\t\t{\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"srl1\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"two_updates\": {\n\t\topts: []GNMIOption{\n\t\t\tNotification(\n\t\t\t\tTimestamp(42),\n\t\t\t\tUpdate(\n\t\t\t\t\tPath(\"/system/name\"),\n\t\t\t\t\tValue(\"srl1\", \"json_ietf\"),\n\t\t\t\t),\n\t\t\t\tUpdate(\n\t\t\t\t\tPath(\"/interface\"),\n\t\t\t\t\tValue(map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t}, \"json_ietf\"),\n\t\t\t\t),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.GetResponse{\n\t\t\tNotification: []*gnmi.Notification{\n\t\t\t\t{\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"srl1\\\"\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"interface\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\t\t\tJsonIetfVal: []byte(`{\"name\":\"ethernet-1/1\"}`),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestNewGetResponse(t *testing.T) {\n\tfor name, item := range getResponseTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewGetResponse(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.GetResponsesEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Set Request / Response tests\ntype setRequestInput struct {\n\topts []GNMIOption\n\treq  *gnmi.SetRequest\n}\n\nvar setRequestTestSet = map[string]setRequestInput{\n\t\"update\": {\n\t\topts: []GNMIOption{\n\t\t\tUpdate(Path(\"/system/name/host-name\"), Value(\"srl2\", \"json_ietf\")),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"two_updates\": {\n\t\topts: []GNMIOption{\n\t\t\tUpdate(\n\t\t\t\tPath(\"/system/name/host-name\"),\n\t\t\t\tValue(\"srl2\", \"json_ietf\"),\n\t\t\t),\n\t\t\tUpdate(\n\t\t\t\tPath(\"/system/gnmi-server/unix-socket/admin-state\"),\n\t\t\t\tValue(\"enable\", \"json_ietf\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"gnmi-server\"},\n\t\t\t\t\t\t\t{Name: \"unix-socket\"},\n\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"enable\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"replace\": {\n\t\topts: []GNMIOption{\n\t\t\tReplace(Path(\"/system/name/host-name\"), Value(\"srl2\", \"json_ietf\")),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"two_replaces\": {\n\t\topts: []GNMIOption{\n\t\t\tReplace(\n\t\t\t\tPath(\"/system/name/host-name\"),\n\t\t\t\tValue(\"srl2\", \"json_ietf\"),\n\t\t\t),\n\t\t\tReplace(\n\t\t\t\tPath(\"/system/gnmi-server/unix-socket/admin-state\"),\n\t\t\t\tValue(\"enable\", \"json_ietf\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"gnmi-server\"},\n\t\t\t\t\t\t\t{Name: \"unix-socket\"},\n\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"enable\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"delete\": {\n\t\topts: []GNMIOption{\n\t\t\tDelete(\"/system/name/host-name\"),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"two_deletes\": {\n\t\topts: []GNMIOption{\n\t\t\tDelete(\"/system/name/host-name\"),\n\t\t\tDelete(\"interface/description\"),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"interface\"},\n\t\t\t\t\t\t{Name: \"description\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"update_replace\": {\n\t\topts: []GNMIOption{\n\t\t\tUpdate(\n\t\t\t\tPath(\"/system/name/host-name\"),\n\t\t\t\tValue(\"srl2\", \"json_ietf\"),\n\t\t\t),\n\t\t\tReplace(\n\t\t\t\tPath(\"/system/gnmi-server/unix-socket/admin-state\"),\n\t\t\t\tValue(\"enable\", \"json_ietf\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"gnmi-server\"},\n\t\t\t\t\t\t\t{Name: \"unix-socket\"},\n\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"enable\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"update_replace_delete\": {\n\t\topts: []GNMIOption{\n\t\t\tUpdate(\n\t\t\t\tPath(\"/system/name/host-name\"),\n\t\t\t\tValue(\"srl2\", \"json_ietf\"),\n\t\t\t),\n\t\t\tReplace(\n\t\t\t\tPath(\"/system/gnmi-server/unix-socket/admin-state\"),\n\t\t\t\tValue(\"enable\", \"json_ietf\"),\n\t\t\t),\n\t\t\tDelete(\"/system/name/host-name\"),\n\t\t},\n\t\treq: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl2\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t{Name: \"gnmi-server\"},\n\t\t\t\t\t\t\t{Name: \"unix-socket\"},\n\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"enable\\\"\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestNewSetRequest(t *testing.T) {\n\tfor name, item := range setRequestTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewSetRequest(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.SetRequestsEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype setResponseInput struct {\n\topts []GNMIOption\n\treq  *gnmi.SetResponse\n}\n\nvar setResponseTestSet = map[string]setResponseInput{\n\t\"simple\": {\n\t\topts: []GNMIOption{\n\t\t\tTimestamp(42),\n\t\t\tUpdateResult(\n\t\t\t\tOperation(\"update\"),\n\t\t\t\tPath(\"interface\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SetResponse{\n\t\t\tResponse: []*gnmi.UpdateResult{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"interface\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOp: gnmi.UpdateResult_UPDATE,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTimestamp: 42,\n\t\t},\n\t},\n\t\"combined\": {\n\t\topts: []GNMIOption{\n\t\t\tTimestamp(42),\n\t\t\tUpdateResult(\n\t\t\t\tOperation(\"update\"),\n\t\t\t\tPath(\"interface\"),\n\t\t\t),\n\t\t\tUpdateResult(\n\t\t\t\tOperation(\"replace\"),\n\t\t\t\tPath(\"network-instance\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SetResponse{\n\t\t\tResponse: []*gnmi.UpdateResult{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"interface\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOp: gnmi.UpdateResult_UPDATE,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"network-instance\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tOp: gnmi.UpdateResult_REPLACE,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTimestamp: 42,\n\t\t},\n\t},\n}\n\nfunc TestNewSetResponse(t *testing.T) {\n\tfor name, item := range setResponseTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewSetResponse(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.SetResponsesEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Subscribe Request / Response tests\ntype subscribeRequestInput struct {\n\topts []GNMIOption\n\treq  *gnmi.SubscribeRequest\n}\n\nvar subscribeRequestTestSet = map[string]subscribeRequestInput{\n\t\"subscription\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingJSON_IETF(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_mode_ONCE\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscriptionListModeONCE(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tMode: gnmi.SubscriptionList_ONCE,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_mode_POLL\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscriptionListModePOLL(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tMode: gnmi.SubscriptionList_POLL,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_mode_STREAM\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscriptionListModeSTREAM(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tMode: gnmi.SubscriptionList_STREAM,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_mode_SAMPLE\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionModeSAMPLE(),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_mode_TARGET_DEFINED\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionModeTARGET_DEFINED(),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_TARGET_DEFINED,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_mode_ON_CHANGE\": {\n\t\topts: []GNMIOption{\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionModeON_CHANGE(),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_sample\": {\n\t\topts: []GNMIOption{\n\t\t\tEncoding(\"json_ietf\"),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode:           gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tSampleInterval: uint64(10 * time.Second),\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_encoding_json\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingJSON(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_encoding_bytes\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingBYTES(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_BYTES,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_encoding_proto\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingPROTO(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_PROTO,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_encoding_ascii\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingASCII(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_ASCII,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"subscription_list_encoding_json_ietf\": {\n\t\topts: []GNMIOption{\n\t\t\tEncodingJSON_IETF(),\n\t\t\tSubscription(\n\t\t\t\tPath(\"system/name\"),\n\t\t\t\tSubscriptionMode(\"sample\"),\n\t\t\t\tSampleInterval(10*time.Second),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n\t\t\t\t\tSubscription: []*gnmi.Subscription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestNewSubscribeRequest(t *testing.T) {\n\tfor name, item := range subscribeRequestTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewSubscribeRequest(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.SubscribeRequestsEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype subscribeResponseInput struct {\n\topts []GNMIOption\n\treq  *gnmi.SubscribeResponse\n}\n\nvar subscribeResponseTestSet = map[string]subscribeResponseInput{\n\t\"simple\": {\n\t\topts: []GNMIOption{\n\t\t\tNotification(\n\t\t\t\tTimestamp(42),\n\t\t\t\t// Alias(\"alias1\"),\n\t\t\t\tUpdate(\n\t\t\t\t\tPath(\"interface\"),\n\t\t\t\t\tValue(map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t}, \"json_ietf\"),\n\t\t\t\t),\n\t\t\t\tDelete(\"/interface[name=ethernet-1/2]\"),\n\t\t\t\tAtomic(true),\n\t\t\t),\n\t\t},\n\t\treq: &gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t// Alias:     \"alias1\",\n\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t{Name: \"interface\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\t\t\tJsonIetfVal: []byte(`{\"name\":\"ethernet-1/1\"}`),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\tKey:  map[string]string{\"name\": \"ethernet-1/2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tAtomic: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestNewSubscribeResponse(t *testing.T) {\n\tfor name, item := range subscribeResponseTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewSubscribeResponse(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !testutils.SubscribeResponsesEqual(nreq, item.req) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.req)\n\t\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewSubscribeRequestPoll(t *testing.T) {\n\tname := \"single_case\"\n\tt.Run(name, func(t *testing.T) {\n\t\tnreq, err := NewSubscribePollRequest()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(&gnmi.SubscribeRequest{\n\t\t\tRequest: &gnmi.SubscribeRequest_Poll{\n\t\t\t\tPoll: new(gnmi.Poll),\n\t\t\t}}, nreq) {\n\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\tt.Errorf(\"expected %+v\", &gnmi.SubscribeRequest{Request: &gnmi.SubscribeRequest_Poll{}})\n\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestNewSubscribeResponseSync(t *testing.T) {\n\tname := \"single_case\"\n\tt.Run(name, func(t *testing.T) {\n\t\tnreq, err := NewSubscribeSyncResponse()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(&gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{\n\t\t\t\tSyncResponse: true,\n\t\t\t},\n\t\t}, nreq) {\n\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\tt.Errorf(\"expected %+v\", &gnmi.SubscribeRequest{Request: &gnmi.SubscribeRequest_Poll{}})\n\t\t\tt.Errorf(\"     got %+v\", nreq)\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\n// Value tests\ntype valueInput struct {\n\tdata     interface{}\n\tencoding string\n\tmsg      *gnmi.Update\n\terr      error\n}\n\nvar valueTestSet = map[string]valueInput{\n\t// json\n\t\"json_string\": {\n\t\tdata:     \"value\",\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_string_special_chars\": {\n\t\tdata:     \"<.*>\",\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: []byte(\"\\\"<.*>\\\"\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_string_array\": {\n\t\tdata:     []string{\"foo\", \"bar\"},\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: []byte(\"[\\\"foo\\\",\\\"bar\\\"]\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_interface{}_array\": {\n\t\tdata:     []interface{}{\"foo\", 42},\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: []byte(\"[\\\"foo\\\",42]\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_map\": {\n\t\tdata:     map[string]interface{}{\"k\": \"v\"},\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\tJsonVal: []byte(\"{\\\"k\\\":\\\"v\\\"}\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// json_ietf\n\t\"json_ietf_string\": {\n\t\tdata:     \"value\",\n\t\tencoding: \"json_ietf\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_ietf_string_array\": {\n\t\tdata:     []string{\"foo\", \"bar\"},\n\t\tencoding: \"json_ietf\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: []byte(\"[\\\"foo\\\",\\\"bar\\\"]\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_ietf_interface{}_array\": {\n\t\tdata:     []interface{}{\"foo\", int(42)},\n\t\tencoding: \"json_ietf\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: []byte(\"[\\\"foo\\\",42]\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"json_ietf_map\": {\n\t\tdata:     map[string]interface{}{\"k\": \"v\"},\n\t\tencoding: \"json_ietf\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\tJsonIetfVal: []byte(\"{\\\"k\\\":\\\"v\\\"}\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// ascii\n\t\"ascii_string\": {\n\t\tdata:     \"foo\",\n\t\tencoding: \"ascii\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\tAsciiVal: \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"ascii_string_array\": {\n\t\tdata:     []string{\"foo\", \"bar\"},\n\t\tencoding: \"ascii\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\tElement: []*gnmi.TypedValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_StringVal{StringVal: \"foo\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_StringVal{StringVal: \"bar\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"ascii_interface{}_array\": {\n\t\tdata:     []interface{}{\"foo\", 42},\n\t\tencoding: \"ascii\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\tElement: []*gnmi.TypedValue{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_StringVal{StringVal: \"foo\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_IntVal{IntVal: 42},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// typed values\n\t\"typed_value\": {\n\t\tdata: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl1\\\"\")}},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl1\\\"\")}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_json\": {\n\t\tdata: &gnmi.TypedValue_JsonVal{JsonVal: []byte(\"\\\"srl1\\\"\")},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte(\"\\\"srl1\\\"\")}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_json_ietf\": {\n\t\tdata: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl1\\\"\")},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte(\"\\\"srl1\\\"\")}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_ascii\": {\n\t\tdata: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_bool\": {\n\t\tdata: &gnmi.TypedValue_BoolVal{BoolVal: true},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: true}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_bytes\": {\n\t\tdata: &gnmi.TypedValue_BytesVal{BytesVal: []byte{0, 42}},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte{0, 42}}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_float\": {\n\t\tdata: &gnmi.TypedValue_FloatVal{FloatVal: 42.1},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_FloatVal{FloatVal: 42.1}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_int\": {\n\t\tdata: &gnmi.TypedValue_IntVal{IntVal: 42},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 42}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_uint\": {\n\t\tdata: &gnmi.TypedValue_UintVal{UintVal: 42},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 42}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_string\": {\n\t\tdata: &gnmi.TypedValue_StringVal{StringVal: \"foo\"},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: \"foo\"}},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"typed_value_leaf_list\": {\n\t\tdata: &gnmi.TypedValue_LeaflistVal{\n\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\tElement: []*gnmi.TypedValue{\n\t\t\t\t\t{Value: &gnmi.TypedValue_StringVal{StringVal: \"foo\"}},\n\t\t\t\t\t{Value: &gnmi.TypedValue_UintVal{UintVal: 42}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\tElement: []*gnmi.TypedValue{\n\t\t\t\t\t\t\t{Value: &gnmi.TypedValue_StringVal{StringVal: \"foo\"}},\n\t\t\t\t\t\t\t{Value: &gnmi.TypedValue_UintVal{UintVal: 42}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t// scalar\n\t\"from_scalar\": {\n\t\tdata:     42,\n\t\tencoding: \"json\",\n\t\tmsg: &gnmi.Update{\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_IntVal{\n\t\t\t\t\tIntVal: 42,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"invalid_value\": {\n\t\tdata: nil,\n\t\terr:  ErrInvalidValue,\n\t},\n}\n\nfunc TestValue(t *testing.T) {\n\tfor name, item := range valueTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tupd := new(gnmi.Update)\n\t\t\terr := Value(item.data, item.encoding)(upd)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\t\tt.Errorf(\"expected err: %+v\", item.err)\n\t\t\t\t\tt.Errorf(\"     got err: %+v\", err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.GnmiValuesEqual(item.msg.GetVal(), upd.GetVal()) {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.msg.GetVal())\n\t\t\t\tt.Errorf(\"     got %+v\", upd.GetVal())\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Version tests\n\nfunc TestVersion(t *testing.T) {\n\tname := \"nil_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := Version(DefaultGNMIVersion)(nil)\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n\tname = \"invalid_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := Version(DefaultGNMIVersion)(new(gnmi.GetRequest))\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestSupportedEncoding(t *testing.T) {\n\tname := \"nil_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := SupportedEncoding(\"json\")(nil)\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n\tname = \"invalid_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := SupportedEncoding(\"json\")(new(gnmi.GetRequest))\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n\tname = \"invalid_value\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := SupportedEncoding(\"not_valid\")(new(gnmi.GetRequest))\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestSupportedModel(t *testing.T) {\n\tname := \"nil_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := SupportedModel(\"\", \"\", \"\")(nil)\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n\tname = \"invalid_msg\"\n\tt.Run(name, func(t *testing.T) {\n\t\terr := SupportedModel(\"\", \"\", \"\")(new(gnmi.GetRequest))\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t})\n\tname = \"ok\"\n\tt.Run(name, func(t *testing.T) {\n\t\tcapRsp := new(gnmi.CapabilityResponse)\n\t\terr := SupportedModel(\"foo\", \"bar\", \"v2\")(capRsp)\n\t\tif err != nil {\n\t\t\tif !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) {\n\t\t\t\tt.Errorf(\"failed at %q with error: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t}\n\t\tif len(capRsp.SupportedModels) != 1 {\n\t\t\tt.Fail()\n\t\t}\n\t\tif capRsp.SupportedModels[0].Name != \"foo\" {\n\t\t\tt.Fail()\n\t\t}\n\t\tif capRsp.SupportedModels[0].Organization != \"bar\" {\n\t\t\tt.Fail()\n\t\t}\n\t\tif capRsp.SupportedModels[0].Version != \"v2\" {\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "pkg/api/go.mod",
    "content": "module github.com/openconfig/gnmic/pkg/api\n\ngo 1.24.12\n\nrequire (\n\tgithub.com/AlekSi/pointer v1.2.0\n\tgithub.com/google/go-cmp v0.7.0\n\tgithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0\n\tgithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0\n\tgithub.com/jhump/protoreflect v1.17.0\n\tgithub.com/juju/ratelimit v1.0.2\n\tgithub.com/openconfig/gnmi v0.14.1\n\tgithub.com/openconfig/grpctunnel v0.1.0\n\tgithub.com/pkg/errors v0.9.1\n\tgithub.com/prometheus/client_golang v1.20.5\n\tgolang.org/x/net v0.48.0\n\tgolang.org/x/oauth2 v0.34.0\n\tgolang.org/x/sync v0.19.0\n\tgoogle.golang.org/grpc v1.78.0\n\tgoogle.golang.org/protobuf v1.36.11\n)\n\nrequire (\n\tcloud.google.com/go/compute/metadata v0.9.0 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/bufbuild/protocompile v0.14.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/prometheus/client_model v0.6.1 // indirect\n\tgithub.com/prometheus/common v0.55.0 // indirect\n\tgithub.com/prometheus/procfs v0.15.1 // indirect\n\tgolang.org/x/sys v0.39.0 // indirect\n\tgolang.org/x/text v0.32.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect\n)\n"
  },
  {
    "path": "pkg/api/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ngithub.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w=\ngithub.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=\ngithub.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=\ngithub.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8=\ngithub.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI=\ngithub.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=\ngithub.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs=\ngithub.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0=\ngithub.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM=\ngithub.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo=\ngithub.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=\ngithub.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=\ngithub.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=\ngithub.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=\ngithub.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=\ngithub.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=\ngithub.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=\ngithub.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=\ngithub.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=\ngo.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=\ngo.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=\ngo.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=\ngo.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=\ngo.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=\ngo.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=\ngo.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=\ngo.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=\ngo.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=\ngo.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=\ngo.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=\ngolang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=\ngolang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=\ngolang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=\ngolang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=\ngoogle.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=\ngoogle.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\n"
  },
  {
    "path": "pkg/api/path/path.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage path\n\nimport (\n\t\"errors\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nvar errMalformedXPath = errors.New(\"malformed xpath\")\nvar errMalformedXPathKey = errors.New(\"malformed xpath key\")\nvar errEmptyPathElemName = errors.New(\"empty path element name\")\n\nvar escapedBracketsReplacer = strings.NewReplacer(`\\]`, `]`, `\\[`, `[`)\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\n// CreatePrefix //\nfunc CreatePrefix(prefix, target string) (*gnmi.Path, error) {\n\tif len(prefix)+len(target) == 0 {\n\t\treturn nil, nil\n\t}\n\tp, err := ParsePath(prefix)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif target != \"\" {\n\t\tp.Target = target\n\t}\n\treturn p, nil\n}\n\n// ParsePath creates a gnmi.Path out of a p string, check if the first element is prefixed by an origin,\n// removes it from the xpath and adds it to the returned gnmiPath\nfunc ParsePath(p string) (*gnmi.Path, error) {\n\tlp := len(p)\n\tif lp == 0 {\n\t\treturn &gnmi.Path{}, nil\n\t}\n\tvar origin string\n\n\tidx := strings.Index(p, \":\")\n\tif idx >= 0 && p[0] != '/' && !strings.Contains(p[:idx], \"/\") &&\n\t\t// path == origin:/ || path == origin:\n\t\t((idx+1 < lp && p[idx+1] == '/') || (lp == idx+1)) {\n\t\torigin = p[:idx]\n\t\tp = p[idx+1:]\n\t}\n\n\tpes, err := toPathElems(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gnmi.Path{\n\t\tOrigin: origin,\n\t\tElem:   pes,\n\t}, nil\n}\n\n// toPathElems parses a xpath and returns a list of path elements\nfunc toPathElems(p string) ([]*gnmi.PathElem, error) {\n\tif !strings.HasSuffix(p, \"/\") {\n\t\tp += \"/\"\n\t}\n\tbuffer := make([]rune, 0)\n\tnull := rune(0)\n\tprevC := rune(0)\n\t// track if the loop is traversing a key\n\tinKey := false\n\tfor _, r := range p {\n\t\tswitch r {\n\t\tcase '[':\n\t\t\tif inKey && prevC != '\\\\' {\n\t\t\t\treturn nil, errMalformedXPath\n\t\t\t}\n\t\t\tif prevC != '\\\\' {\n\t\t\t\tinKey = true\n\t\t\t}\n\t\tcase ']':\n\t\t\tif !inKey && prevC != '\\\\' {\n\t\t\t\treturn nil, errMalformedXPath\n\t\t\t}\n\t\t\tif prevC != '\\\\' {\n\t\t\t\tinKey = false\n\t\t\t}\n\t\tcase '/':\n\t\t\tif !inKey {\n\t\t\t\tbuffer = append(buffer, null)\n\t\t\t\tprevC = r\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbuffer = append(buffer, r)\n\t\tprevC = r\n\t}\n\tif inKey {\n\t\treturn nil, errMalformedXPath\n\t}\n\tstringElems := strings.Split(string(buffer), string(null))\n\tpElems := make([]*gnmi.PathElem, 0, len(stringElems))\n\tfor _, s := range stringElems {\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tpe, err := toPathElem(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpElems = append(pElems, pe)\n\t}\n\treturn pElems, nil\n}\n\n// toPathElem take a xpath formatted path element such as \"elem1[k=v]\" and returns the corresponding gnmi.PathElem\nfunc toPathElem(s string) (*gnmi.PathElem, error) {\n\tidx := -1\n\tprevC := rune(0)\n\tfor i, r := range s {\n\t\tif r == '[' && prevC != '\\\\' {\n\t\t\tidx = i\n\t\t\tbreak\n\t\t}\n\t\tprevC = r\n\t}\n\tvar kvs map[string]string\n\tif idx > 0 {\n\t\tvar err error\n\t\tkvs, err = parseXPathKeys(s[idx:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts = s[:idx]\n\t} else if idx == 0 {\n\t\treturn nil, errEmptyPathElemName\n\t}\n\tif s == \"\" {\n\t\treturn nil, errEmptyPathElemName\n\t}\n\treturn &gnmi.PathElem{Name: s, Key: kvs}, nil\n}\n\n// parseXPathKeys takes keys definition from an xpath, e.g [k1=v1][k2=v2] and return the keys and values as a map[string]string\nfunc parseXPathKeys(s string) (map[string]string, error) {\n\tif len(s) == 0 {\n\t\treturn nil, nil\n\t}\n\tkvs := make(map[string]string)\n\tinKey := false\n\tstart := 0\n\tprevRune := rune(0)\n\tfor i, r := range s {\n\t\tswitch r {\n\t\tcase '[':\n\t\t\tif prevRune == '\\\\' {\n\t\t\t\tprevRune = r\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif inKey {\n\t\t\t\treturn nil, errMalformedXPathKey\n\t\t\t}\n\t\t\tinKey = true\n\t\t\tstart = i + 1\n\t\tcase ']':\n\t\t\tif prevRune == '\\\\' {\n\t\t\t\tprevRune = r\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !inKey {\n\t\t\t\treturn nil, errMalformedXPathKey\n\t\t\t}\n\t\t\teq := strings.Index(s[start:i], \"=\")\n\t\t\tif eq < 0 {\n\t\t\t\treturn nil, errMalformedXPathKey\n\t\t\t}\n\t\t\tk, v := s[start:i][:eq], s[start:i][eq+1:]\n\t\t\tif len(k) == 0 || len(v) == 0 {\n\t\t\t\treturn nil, errMalformedXPathKey\n\t\t\t}\n\t\t\tkvs[escapedBracketsReplacer.Replace(k)] = escapedBracketsReplacer.Replace(v)\n\t\t\tinKey = false\n\n\t\tdefault:\n\t\t\tif !inKey {\n\t\t\t\treturn nil, errMalformedXPathKey\n\t\t\t}\n\t\t}\n\t\tprevRune = r\n\t}\n\tif inKey {\n\t\treturn nil, errMalformedXPathKey\n\t}\n\treturn kvs, nil\n}\n\nfunc PathElems(pf, p *gnmi.Path) []*gnmi.PathElem {\n\tr := make([]*gnmi.PathElem, 0, len(pf.GetElem())+len(p.GetElem()))\n\tr = append(r, pf.GetElem()...)\n\treturn append(r, p.GetElem()...)\n}\n\nfunc GnmiPathToXPath(p *gnmi.Path, noKeys bool) string {\n\tif p == nil {\n\t\treturn \"\"\n\t}\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tif p.Origin != \"\" {\n\t\tsb.WriteString(p.Origin)\n\t\tsb.WriteString(\":/\")\n\t}\n\telems := p.GetElem()\n\tnumElems := len(elems)\n\n\tfor i, pe := range elems {\n\t\tsb.WriteString(pe.GetName())\n\t\tif !noKeys {\n\t\t\tnumKeys := len(pe.GetKey())\n\t\t\tswitch numKeys {\n\t\t\tcase 0:\n\t\t\tcase 1:\n\t\t\t\tfor k := range pe.GetKey() {\n\t\t\t\t\twriteKey(sb, k, pe.GetKey()[k])\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tkeys := make([]string, 0, numKeys)\n\t\t\t\tfor k := range pe.GetKey() {\n\t\t\t\t\tkeys = append(keys, k)\n\t\t\t\t}\n\t\t\t\tsort.Strings(keys)\n\t\t\t\tfor _, k := range keys {\n\t\t\t\t\twriteKey(sb, k, pe.GetKey()[k])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif i+1 != numElems {\n\t\t\tsb.WriteString(\"/\")\n\t\t}\n\t}\n\treturn sb.String()\n}\n\nfunc writeKey(sb *strings.Builder, k, v string) {\n\tsb.WriteString(\"[\")\n\tsb.WriteString(k)\n\tsb.WriteString(\"=\")\n\tsb.WriteString(v)\n\tsb.WriteString(\"]\")\n}\n"
  },
  {
    "path": "pkg/api/path/path_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage path\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n)\n\nvar prefixSet = map[string]*gnmi.Path{\n\t\"\": nil,\n\t\"target%%%origin:/e1/e2\": {\n\t\tOrigin: \"origin\",\n\t\tTarget: \"target\",\n\t\tElem: []*gnmi.PathElem{\n\t\t\t{Name: \"e1\"},\n\t\t\t{Name: \"e2\"},\n\t\t},\n\t},\n\t\"/e1\": {\n\t\tElem: []*gnmi.PathElem{\n\t\t\t{Name: \"e1\"},\n\t\t},\n\t},\n\t\"/e1/e2[k=v]\": {\n\t\tElem: []*gnmi.PathElem{\n\t\t\t{Name: \"e1\"},\n\t\t\t{Name: \"e2\",\n\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t}},\n\t\t},\n\t},\n}\n\nvar pathsTable = map[string]struct {\n\tstrPath     string\n\tgnmiPath    *gnmi.Path\n\tisOK        bool\n\texpectedErr error\n}{\n\t\"empty_path\": {\n\t\tstrPath:     \"\",\n\t\tgnmiPath:    &gnmi.Path{},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_slash_only\": {\n\t\tstrPath:  \"/\",\n\t\tgnmiPath: &gnmi.Path{},\n\t\tisOK:     true,\n\t},\n\t\"path_with_one_path_element\": {\n\t\tstrPath: \"e\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e\"},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_one_path_element_with_slash\": {\n\t\tstrPath: \"/e\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e\"},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_two_path_elements\": {\n\t\tstrPath: \"/e1/e2\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\"},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_two_path_elements_with_key\": {\n\t\tstrPath: \"/e1/e2[k=v]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_multiple_path_elements_and_multiple_keys\": {\n\t\tstrPath: \"/e1/e2[k1=v1][k2=v2]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k1\": \"v1\",\n\t\t\t\t\t\t\"k2\": \"v2\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin\": {\n\t\tstrPath: \"origin:/e1/e2\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\"},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin_only\": {\n\t\tstrPath: \"origin:\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t},\n\t\tisOK: true,\n\t},\n\t\"path_with_origin_and_slash_only\": {\n\t\tstrPath: \"origin:/\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t},\n\t\tisOK: true,\n\t},\n\t\"path_with_empty_origin\": {\n\t\tstrPath:  \":\",\n\t\tgnmiPath: &gnmi.Path{},\n\t\tisOK:     true,\n\t},\n\t\"path_with_empty_origin_and_slash_only\": {\n\t\tstrPath:  \":/\",\n\t\tgnmiPath: &gnmi.Path{},\n\t\tisOK:     true,\n\t},\n\t\"path_with_origin_and_key\": {\n\t\tstrPath: \"origin:/e1/e2[k=v]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin_and_multiple_keys\": {\n\t\tstrPath: \"origin:/e1[name=object]/e2[addr=1.1.1.1/32]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"name\": \"object\",\n\t\t\t\t\t}},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"addr\": \"1.1.1.1/32\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_colon_in_path_elem\": {\n\t\tstrPath: \"origin:/e1:e1[k=1.1.1.1/32]/e2[k1=v2]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1:e1\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"1.1.1.1/32\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k1\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_colon_in_2_path_elems\": {\n\t\tstrPath: \"origin:/e1:e1[k=1.1.1.1/32]/e2:e3[k1=v2]\",\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1:e1\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"1.1.1.1/32\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{Name: \"e2:e3\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k1\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_escaped_open_bracket\": {\n\t\tstrPath: `/e1\\[/e2[k=v]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: `e1\\[`},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_escaped_close_bracket\": {\n\t\tstrPath: `/e1\\]/e2[k=v]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: `e1\\]`},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_colon_in_first_path_elem\": {\n\t\tstrPath: `e1:e2/e3[k=v]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1:e2\"},\n\t\t\t\t{Name: \"e3\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_colon_in_key_value\": {\n\t\tstrPath: `/e1/e2[k=v:1]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v:1\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_without_origin_with_colon_in_path_elem\": {\n\t\tstrPath: `e1/e2:e3[k=v:1]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2:e3\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v:1\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin_and_colon_in_key_value\": {\n\t\tstrPath: `origin:/e1/e2[k=v:1]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": \"v:1\",\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin_and_colon_space_in_key_value\": {\n\t\tstrPath: `origin:/e1/e2[k=v a:1]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": `v a:1`,\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_origin_and_colon_space_in_key_value_double_quoted_value\": {\n\t\tstrPath: `origin:/e1/e2[k=\"v a:1\"]`,\n\t\tgnmiPath: &gnmi.Path{\n\t\t\tOrigin: \"origin\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"e1\"},\n\t\t\t\t{Name: \"e2\",\n\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\"k\": `\"v a:1\"`,\n\t\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tisOK:        true,\n\t\texpectedErr: nil,\n\t},\n\t\"path_with_missing_closing_bracket\": {\n\t\tstrPath:     `/e1/e2[k=v`,\n\t\tgnmiPath:    nil,\n\t\tisOK:        false,\n\t\texpectedErr: errMalformedXPath,\n\t},\n\t\"path_with_missing_open_bracket\": {\n\t\tstrPath:     `/e1/e2k=v]`,\n\t\tgnmiPath:    nil,\n\t\tisOK:        false,\n\t\texpectedErr: errMalformedXPath,\n\t},\n\t\"path_with_key_missing_equal_sign\": {\n\t\tstrPath:     `/e1/e2[k]`,\n\t\tgnmiPath:    nil,\n\t\tisOK:        false,\n\t\texpectedErr: errMalformedXPathKey,\n\t},\n}\n\ntype outKeysSet struct {\n\tout map[string]string\n\terr error\n}\ntype outPathElemSet struct {\n\tout *gnmi.PathElem\n\terr error\n}\n\nvar keysSet = map[string]struct {\n\tin  string\n\texp outKeysSet\n}{\n\t\"no_key\": {\n\t\tin: \"\",\n\t\texp: outKeysSet{\n\t\t\tout: nil,\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"one_key\": {\n\t\tin: \"[k=v]\",\n\t\texp: outKeysSet{\n\t\t\tout: map[string]string{\"k\": \"v\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"two_key\": {\n\t\tin: \"[k1=v1][k2=1.1.1.1/30]\",\n\t\texp: outKeysSet{\n\t\t\tout: map[string]string{\"k1\": \"v1\", \"k2\": \"1.1.1.1/30\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"noval_key\": {\n\t\tin: \"[k1=]\",\n\t\texp: outKeysSet{\n\t\t\tout: nil,\n\t\t\terr: errMalformedXPathKey,\n\t\t},\n\t},\n\t\"nokey_with_val\": {\n\t\tin: \"[=v]\",\n\t\texp: outKeysSet{\n\t\t\tout: nil,\n\t\t\terr: errMalformedXPathKey,\n\t\t},\n\t},\n\t\"inKey_brackets\": {\n\t\tin: \"[k=[v]\",\n\t\texp: outKeysSet{\n\t\t\tout: nil,\n\t\t\terr: errMalformedXPathKey,\n\t\t},\n\t},\n\t\"inKey_escaped_open_bracket\": {\n\t\tin: `[k=\\[v]`,\n\t\texp: outKeysSet{\n\t\t\tout: map[string]string{\"k\": \"[v\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"inKey_escaped_close_bracket\": {\n\t\tin: `[k=\\]v]`,\n\t\texp: outKeysSet{\n\t\t\tout: map[string]string{\"k\": \"]v\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"inKey_escaped_brackets\": {\n\t\tin: `[\\[k=\\]v]`,\n\t\texp: outKeysSet{\n\t\t\tout: map[string]string{\"[k\": \"]v\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n}\nvar pathElemSet = map[string]struct {\n\tin  string\n\tout outPathElemSet\n}{\n\t\"no_key\": {\n\t\tin: \"elem1\",\n\t\tout: outPathElemSet{\n\t\t\tout: &gnmi.PathElem{Name: \"elem1\"},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"with_1_key\": {\n\t\tin: \"elem1[k=v]\",\n\t\tout: outPathElemSet{\n\t\t\tout: &gnmi.PathElem{Name: \"elem1\", Key: map[string]string{\"k\": \"v\"}},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"with_2_keys\": {\n\t\tin: \"elem1[k1=v1][k2=v2]\",\n\t\tout: outPathElemSet{\n\t\t\tout: &gnmi.PathElem{Name: \"elem1\", Key: map[string]string{\"k1\": \"v1\", \"k2\": \"v2\"}},\n\t\t\terr: nil,\n\t\t},\n\t},\n\t\"with_1_key_malformed\": {\n\t\tin: \"elem1[k1=v1\",\n\t\tout: outPathElemSet{\n\t\t\tout: nil,\n\t\t\terr: errMalformedXPathKey,\n\t\t},\n\t},\n\t\"elem_with_escaped_bracket\": {\n\t\tin: `elem1\\[k1=v1`,\n\t\tout: outPathElemSet{\n\t\t\tout: &gnmi.PathElem{Name: `elem1\\[k1=v1`},\n\t\t\terr: nil,\n\t\t},\n\t},\n}\n\nfunc TestCreatePrefix(t *testing.T) {\n\tvar target, prefix string\n\tfor e, p := range prefixSet {\n\t\tval := strings.Split(e, \"%%%\")\n\t\t//fmt.Printf(\"%d: %v\\n\", len(val), val)\n\t\tif len(val) == 2 {\n\t\t\ttarget, prefix = val[0], val[1]\n\t\t} else if len(val) == 1 {\n\t\t\ttarget, prefix = \"\", val[0]\n\t\t}\n\t\t//fmt.Println(target, prefix)\n\t\tgp, err := CreatePrefix(prefix, target)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif !reflect.DeepEqual(p, gp) {\n\t\t\tt.Errorf(\"failed at elem: %s: expecting %v, got %v\", e, p, gp)\n\t\t}\n\n\t}\n}\n\nfunc TestParsePath(t *testing.T) {\n\tfor name, tc := range pathsTable {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tp, err := ParsePath(tc.strPath)\n\t\t\tif err != nil && tc.isOK {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif !tc.isOK {\n\t\t\t\tif err != tc.expectedErr {\n\t\t\t\t\tt.Errorf(\"failed at '%s', expected error %+v, got %+v\", name, tc.expectedErr, err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.GnmiPathsEqual(p, tc.gnmiPath) {\n\t\t\t\tt.Errorf(\"failed at '%s', expected %v, got %+v\", name, tc.gnmiPath, p)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestParseXPathKeys(t *testing.T) {\n\tfor name, input := range keysSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tkeys, err := parseXPathKeys(input.in)\n\t\t\tif !cmp.Equal(keys, input.exp.out) {\n\t\t\t\tt.Errorf(\"failed at '%s', expected %v, got %+v\", name, input.exp.out, keys)\n\t\t\t}\n\t\t\tif err != input.exp.err {\n\t\t\t\tt.Errorf(\"failed at '%s', expected error %+v, got %+v\", name, input.exp.err, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStringToPathElem(t *testing.T) {\n\tfor name, input := range pathElemSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgnmiPathElem, err := toPathElem(input.in)\n\t\t\tif gnmiPathElem == nil || input.out.out == nil {\n\t\t\t\tif gnmiPathElem != input.out.out {\n\t\t\t\t\tt.Errorf(\"failed at '%s', expected %v, got %+v\", name, input.out.out, gnmiPathElem)\n\t\t\t\t}\n\t\t\t} else if !cmp.Equal(gnmiPathElem.Key, input.out.out.Key) || gnmiPathElem.Name != input.out.out.Name {\n\t\t\t\tt.Errorf(\"failed at '%s', expected %v, got %+v\", name, input.out.out, gnmiPathElem)\n\t\t\t}\n\t\t\tif err != input.out.err {\n\t\t\t\tt.Errorf(\"failed at '%s', expected error %+v, got %+v\", name, input.out.err, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkParsePath(b *testing.B) {\n\tfor name, tc := range pathsTable {\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tParsePath(tc.strPath)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGnmiPathToXPath(t *testing.T) {\n\ttests := []struct {\n\t\tname string // description of this test case\n\t\t// Named input parameters for target function.\n\t\tp      *gnmi.Path\n\t\tnoKeys bool\n\t\twant   string\n\t}{\n\t\t{\n\t\t\tname:   \"nil\",\n\t\t\tp:      nil,\n\t\t\tnoKeys: false,\n\t\t\twant:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:   \"empty_path\",\n\t\t\tp:      &gnmi.Path{},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_one_path_element\",\n\t\t\tp:      &gnmi.Path{Elem: []*gnmi.PathElem{{Name: \"e1\"}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"e1\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_one_path_element_with_key\",\n\t\t\tp:      &gnmi.Path{Elem: []*gnmi.PathElem{{Name: \"e1\", Key: map[string]string{\"k\": \"v\"}}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"e1[k=v]\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_two_path_elements\",\n\t\t\tp:      &gnmi.Path{Elem: []*gnmi.PathElem{{Name: \"e1\"}, {Name: \"e2\"}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"e1/e2\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_two_path_elements_with_key\",\n\t\t\tp:      &gnmi.Path{Elem: []*gnmi.PathElem{{Name: \"e1\", Key: map[string]string{\"k\": \"v\"}}, {Name: \"e2\", Key: map[string]string{\"k1\": \"v1\"}}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"e1[k=v]/e2[k1=v1]\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_origin\",\n\t\t\tp:      &gnmi.Path{Origin: \"origin\", Elem: []*gnmi.PathElem{{Name: \"e1\"}, {Name: \"e2\"}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"origin:/e1/e2\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_origin_and_key\",\n\t\t\tp:      &gnmi.Path{Origin: \"origin\", Elem: []*gnmi.PathElem{{Name: \"e1\", Key: map[string]string{\"k\": \"v\"}}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"origin:/e1[k=v]\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_origin_and_multiple_keys\",\n\t\t\tp:      &gnmi.Path{Origin: \"origin\", Elem: []*gnmi.PathElem{{Name: \"e1\", Key: map[string]string{\"k\": \"v\"}}, {Name: \"e2\", Key: map[string]string{\"k1\": \"v1\"}}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"origin:/e1[k=v]/e2[k1=v1]\",\n\t\t},\n\t\t{\n\t\t\tname:   \"path_with_multiple_keys_in_one_path_element\",\n\t\t\tp:      &gnmi.Path{Elem: []*gnmi.PathElem{{Name: \"e1\", Key: map[string]string{\"k\": \"v\", \"k1\": \"v1\"}}}},\n\t\t\tnoKeys: false,\n\t\t\twant:   \"e1[k=v][k1=v1]\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := GnmiPathToXPath(tt.p, tt.noKeys)\n\t\t\t// TODO: update the condition below to compare got with tt.want.\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"GnmiPathToXPath() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/api/server/options.go",
    "content": "// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage server\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tgrpc_ratelimit \"github.com/grpc-ecosystem/go-grpc-middleware/ratelimit\"\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\t\"github.com/juju/ratelimit\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n)\n\nfunc (s *gNMIServer) serverOpts() ([]grpc.ServerOption, error) {\n\topts := make([]grpc.ServerOption, 0, 1)\n\tcredsOpts, err := s.tlsServerOpts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topts = append(opts, credsOpts)\n\n\tif s.config.Keepalive != nil {\n\t\topts = append(opts, grpc.KeepaliveParams(*s.config.Keepalive))\n\t}\n\tif s.config.MaxRecvMsgSize > 0 {\n\t\topts = append(opts, grpc.MaxRecvMsgSize(s.config.MaxRecvMsgSize))\n\t}\n\tif s.config.MaxSendMsgSize > 0 {\n\t\topts = append(opts, grpc.MaxSendMsgSize(s.config.MaxSendMsgSize))\n\t}\n\tif s.config.MaxConcurrentStreams > 0 {\n\t\topts = append(opts, grpc.MaxConcurrentStreams(s.config.MaxConcurrentStreams))\n\t}\n\topts = append(opts, s.interceptorsOpts()...)\n\treturn opts, nil\n}\n\nfunc (s *gNMIServer) interceptorsOpts() []grpc.ServerOption {\n\tui := []grpc.UnaryServerInterceptor{}\n\tsi := []grpc.StreamServerInterceptor{}\n\tif s.reg != nil {\n\t\tgrpcMetrics := grpc_prometheus.NewServerMetrics()\n\t\tui = append(ui, grpcMetrics.UnaryServerInterceptor())\n\t\tsi = append(si, grpcMetrics.StreamServerInterceptor())\n\t\ts.reg.MustRegister(grpcMetrics)\n\t}\n\tif s.config.RateLimit > 0 {\n\t\tlimiter := &rateLimiterInterceptor{\n\t\t\tbucket: ratelimit.NewBucket(time.Second, s.config.RateLimit),\n\t\t}\n\t\tui = append(ui, grpc_ratelimit.UnaryServerInterceptor(limiter))\n\t\tsi = append(si, grpc_ratelimit.StreamServerInterceptor(limiter))\n\t}\n\treturn []grpc.ServerOption{\n\t\tgrpc.ChainUnaryInterceptor(ui...),\n\t\tgrpc.ChainStreamInterceptor(si...),\n\t}\n}\n\ntype rateLimiterInterceptor struct {\n\tbucket *ratelimit.Bucket\n}\n\nfunc (r *rateLimiterInterceptor) Limit() bool {\n\treturn r.bucket.TakeAvailable(1) == 0\n}\n\nfunc (s *gNMIServer) tlsServerOpts() (grpc.ServerOption, error) {\n\tif s.config.TLS == nil {\n\t\treturn grpc.Creds(insecure.NewCredentials()), nil\n\t}\n\terr := s.config.TLS.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttlsConfig, err := s.createTLSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn grpc.Creds(credentials.NewTLS(tlsConfig)), nil\n}\n\nfunc (s *gNMIServer) createTLSConfig() (*tls.Config, error) {\n\ttlsConfig := &tls.Config{}\n\tif s.config.TLS.CertFile == \"\" && s.config.TLS.KeyFile == \"\" {\n\t\tcert, _ := utils.SelfSignedCerts()\n\t\ttlsConfig.Certificates = []tls.Certificate{cert}\n\t} else {\n\t\ttlsConfig.GetCertificate = s.readCerts\n\t}\n\n\tswitch s.config.TLS.ClientAuth {\n\tdefault:\n\t\ttlsConfig.ClientAuth = tls.NoClientCert\n\tcase \"request\":\n\t\ttlsConfig.ClientAuth = tls.RequestClientCert\n\tcase \"require\":\n\t\ttlsConfig.ClientAuth = tls.RequireAnyClientCert\n\tcase \"verify-if-given\":\n\t\ttlsConfig.ClientAuth = tls.VerifyClientCertIfGiven\n\tcase \"require-verify\":\n\t\ttlsConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\t}\n\n\tif len(s.config.TLS.CaFile) != 0 {\n\t\tcaCertPool, err := utils.LoadCACertificates(s.config.TLS.CaFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsConfig.ClientCAs = caCertPool\n\t}\n\n\treturn tlsConfig, nil\n}\n\nfunc (s *gNMIServer) readCerts(chi *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\tnow := time.Now()\n\n\ts.cm.Lock()\n\tdefer s.cm.Unlock()\n\n\tif !now.After(s.lastRead.Add(time.Minute)) && s.cert != nil {\n\t\treturn s.cert, nil\n\t}\n\n\tcert, err := os.ReadFile(s.config.TLS.CertFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read defined cert file: %w\", err)\n\t}\n\n\tkey, err := os.ReadFile(s.config.TLS.KeyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read defined key file: %w\", err)\n\t}\n\n\tserverCert, err := tls.X509KeyPair(cert, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.cert = &serverCert\n\ts.lastRead = time.Now()\n\treturn &serverCert, nil\n}\n"
  },
  {
    "path": "pkg/api/server/server.go",
    "content": "// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage server\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"golang.org/x/sync/semaphore\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/health\"\n\n\thealthpb \"google.golang.org/grpc/health/grpc_health_v1\"\n\t\"google.golang.org/grpc/keepalive\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/reflection\"\n\t\"google.golang.org/grpc/status\"\n)\n\ntype Config struct {\n\t// gRPC server address\n\tAddress string\n\t// MaxUnaryRPC defines the max number of inflight\n\t// Unary RPCs (Cap, Get, Set,...).\n\t// if negative or unset, there is not limit.\n\tMaxUnaryRPC int64\n\t// MaxStreamingRPC defines the max number of inflight\n\t// streaming RPCs (Subscribe,...).\n\t// if negative or unset, there is not limit.\n\tMaxStreamingRPC int64\n\t// MaxRecvMsgSize defines the max message\n\t// size in bytes the server can receive.\n\t// If this is not set, it defaults to 4MB.\n\tMaxRecvMsgSize int\n\t// MaxSendMsgSize defines the the max message\n\t// size in bytes the server can send.\n\t// If this is not set, the default is `math.MaxInt32`.\n\tMaxSendMsgSize int\n\t// MaxConcurrentStreams defines the max number of\n\t// concurrent streams to each ServerTransport.\n\tMaxConcurrentStreams uint32\n\t// TCPKeepalive set the TCP keepalive time and\n\t// interval, if unset it is enabled based on\n\t// the protocol used and the OS.\n\t// If negative it is disabled.\n\tTCPKeepalive time.Duration\n\t// Keepalive params\n\tKeepalive *keepalive.ServerParameters\n\t// enable gRPC Health RPCs\n\tHealthEnabled bool\n\t// unary RPC request timeout\n\tTimeout time.Duration\n\t// RPCs rate limit\n\tRateLimit int64\n\t// TLS config\n\tTLS *types.TLSConfig\n}\n\ntype gNMIServer struct {\n\tgnmi.UnimplementedGNMIServer\n\n\tconfig Config\n\tlogger *log.Logger\n\treg    *prometheus.Registry\n\t//\n\tunarySem  *semaphore.Weighted\n\tstreamSem *semaphore.Weighted\n\t// gnmi handlers\n\tcapabilitiesHandler CapabilitiesHandler\n\tgetHandler          GetHandler\n\tsetHandler          SetHandler\n\tsubscribeHandler    SubscribeHandler\n\t// cached certificate\n\tcm   *sync.Mutex\n\tcert *tls.Certificate\n\t// certificate last read time\n\tlastRead time.Time\n}\n\n// gNMI Handlers\ntype CapabilitiesHandler func(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error)\n\ntype GetHandler func(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error)\n\ntype SetHandler func(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error)\n\ntype SubscribeHandler func(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error\n\ntype Option func(*gNMIServer)\n\nfunc defaultCapabilitiesHandlerFunc(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {\n\treturn &gnmi.CapabilityResponse{\n\t\tGNMIVersion: \"0.10.0\",\n\t}, nil\n}\n\nfunc (c *Config) setDefaults() error {\n\tif c.Address == \"\" {\n\t\treturn errors.New(\"missing address\")\n\t}\n\tif c.Timeout <= 0 {\n\t\tc.Timeout = 2 * time.Minute\n\t}\n\treturn nil\n}\n\nfunc New(c Config, opts ...Option) (*gNMIServer, error) {\n\terr := c.setDefaults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts := &gNMIServer{\n\t\tconfig: c,\n\t\tcm:     new(sync.Mutex),\n\t}\n\tif c.MaxUnaryRPC > 0 {\n\t\ts.unarySem = semaphore.NewWeighted(c.MaxUnaryRPC)\n\t}\n\tif c.MaxStreamingRPC > 0 {\n\t\ts.streamSem = semaphore.NewWeighted(c.MaxStreamingRPC)\n\t}\n\tfor _, o := range opts {\n\t\to(s)\n\t}\n\tif s.capabilitiesHandler == nil {\n\t\ts.capabilitiesHandler = defaultCapabilitiesHandlerFunc\n\t}\n\treturn s, nil\n}\n\nfunc (s *gNMIServer) Start(ctx context.Context) error {\n\tvar networkType = \"tcp\"\n\tvar addr = s.config.Address\n\tif indx := strings.Index(addr, \"://\"); indx > 0 {\n\t\tnetworkType = addr[:indx]\n\t\taddr = addr[indx+3:]\n\t}\n\tlc := &net.ListenConfig{\n\t\tKeepAlive: s.config.TCPKeepalive,\n\t}\n\tvar l net.Listener\n\tvar err error\n\tfor {\n\t\tl, err = lc.Listen(ctx, networkType, addr)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"cannot listen\")\n\t\t\ts.logger.Print(err)\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\topts, err := s.serverOpts()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// create a gRPC server object\n\tgs := grpc.NewServer(opts...)\n\t// register reflection\n\treflection.Register(gs)\n\t// register gnmi service to the grpc server\n\tgnmi.RegisterGNMIServer(gs, s)\n\n\tif s.config.HealthEnabled {\n\t\ths := health.NewServer()\n\t\thealthpb.RegisterHealthServer(gs, hs)\n\t\ths.SetServingStatus(\"gNMI\", healthpb.HealthCheckResponse_SERVING)\n\t}\n\n\ts.logger.Printf(\"starting gRPC server...\")\n\terr = gs.Serve(l)\n\tif err != nil {\n\t\ts.logger.Printf(\"gRPC serve failed: %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *gNMIServer) Capabilities(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) {\n\tif s.capabilitiesHandler == nil {\n\t\treturn nil, status.Errorf(codes.Unimplemented, \"method Capabilities not implemented\")\n\t}\n\tctx, cancel := context.WithTimeout(ctx, s.config.Timeout)\n\tdefer cancel()\n\terr := s.acquireUnarySem(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer s.releaseUnarySem()\n\treturn s.capabilitiesHandler(ctx, req)\n}\n\nfunc (s *gNMIServer) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\tif s.getHandler == nil {\n\t\treturn nil, status.Errorf(codes.Unimplemented, \"method Get not implemented\")\n\t}\n\tctx, cancel := context.WithTimeout(ctx, s.config.Timeout)\n\tdefer cancel()\n\terr := s.acquireUnarySem(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer s.releaseUnarySem()\n\treturn s.getHandler(ctx, req)\n}\n\nfunc (s *gNMIServer) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\tif s.setHandler == nil {\n\t\treturn nil, status.Errorf(codes.Unimplemented, \"method Set not implemented\")\n\t}\n\tctx, cancel := context.WithTimeout(ctx, s.config.Timeout)\n\tdefer cancel()\n\terr := s.acquireUnarySem(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer s.releaseUnarySem()\n\treturn s.setHandler(ctx, req)\n}\n\nfunc (s *gNMIServer) Subscribe(stream gnmi.GNMI_SubscribeServer) error {\n\tif s.subscribeHandler == nil {\n\t\treturn status.Errorf(codes.Unimplemented, \"method Subscribe not implemented\")\n\t}\n\tctx := stream.Context()\n\terr := s.acquireStreamSem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.releaseStreamSem()\n\t//\n\tpr, _ := peer.FromContext(ctx)\n\ts.logger.Printf(\"received subscribe request from peer %s\", pr.Addr)\n\n\treq, err := stream.Recv()\n\tswitch {\n\tcase err == io.EOF:\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\tcase req.GetSubscribe() == nil:\n\t\treturn status.Errorf(codes.InvalidArgument, \"the subscribe request must contain a subscription definition\")\n\t}\n\treturn s.subscribeHandler(req, stream)\n}\n\nfunc (s *gNMIServer) acquireUnarySem(ctx context.Context) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t\tif s.config.MaxUnaryRPC <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn s.unarySem.Acquire(ctx, 1)\n\t}\n}\n\nfunc (s *gNMIServer) releaseUnarySem() {\n\tif s.config.MaxUnaryRPC <= 0 {\n\t\treturn\n\t}\n\ts.unarySem.Release(1)\n}\n\nfunc (s *gNMIServer) acquireStreamSem(ctx context.Context) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tdefault:\n\t\tif s.config.MaxStreamingRPC <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn s.streamSem.Acquire(ctx, 1)\n\t}\n}\n\nfunc (s *gNMIServer) releaseStreamSem() {\n\tif s.config.MaxStreamingRPC <= 0 {\n\t\treturn\n\t}\n\ts.streamSem.Release(1)\n}\n\n// opts\nfunc WithLogger(l *log.Logger) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.logger = l\n\t}\n}\n\nfunc WithRegistry(reg *prometheus.Registry) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.reg = reg\n\t}\n}\n\nfunc WithCapabilitiesHandler(h CapabilitiesHandler) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.capabilitiesHandler = h\n\t}\n}\n\nfunc WithGetHandler(h GetHandler) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.getHandler = h\n\t}\n}\n\nfunc WithSetHandler(h SetHandler) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.setHandler = h\n\t}\n}\n\nfunc WithSubscribeHandler(h SubscribeHandler) func(*gNMIServer) {\n\treturn func(s *gNMIServer) {\n\t\ts.subscribeHandler = h\n\t}\n}\n"
  },
  {
    "path": "pkg/api/target/subscribe.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage target\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/jhump/protoreflect/dynamic\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/status\"\n)\n\n// Subscribe sends a gnmi.SubscribeRequest to the target *t, responses and error are sent to the target channels\nfunc (t *Target) Subscribe(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) {\n\tvar subscribeClient gnmi.GNMI_SubscribeClient\n\tvar nctx context.Context\n\tvar cancel context.CancelFunc\n\tvar err error\n\tgoto SUBSC_NODELAY\nSUBSC:\n\t{\n\t\tretry := time.NewTimer(t.Config.RetryTimer)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tretry.Stop()\n\t\t\treturn\n\t\tcase <-retry.C:\n\t\t}\n\t}\nSUBSC_NODELAY:\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tnctx, cancel = context.WithCancel(ctx)\n\t\tnctx = t.appendRequestMetadata(nctx)\n\t\tsubscribeClient, err = t.Client.Subscribe(nctx, t.callOpts()...)\n\t\tif err != nil {\n\t\t\tt.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              fmt.Errorf(\"failed to create a subscribe client, target='%s', retry in %s. err=%v\", t.Config.Name, t.Config.RetryTimer, err),\n\t\t\t}\n\t\t\tcancel()\n\t\t\tgoto SUBSC\n\t\t}\n\t}\n\tt.m.Lock()\n\tif cfn, ok := t.subscribeCancelFn[subscriptionName]; ok {\n\t\tcfn()\n\t}\n\tt.SubscribeClients[subscriptionName] = subscribeClient\n\tt.subscribeCancelFn[subscriptionName] = cancel\n\tsubConfig := t.Subscriptions[subscriptionName]\n\tt.m.Unlock()\n\n\terr = subscribeClient.Send(req)\n\tif err != nil {\n\t\tselect {\n\t\tcase t.errors <- &TargetError{\n\t\t\tSubscriptionName: subscriptionName,\n\t\t\tErr:              fmt.Errorf(\"target '%s' send error, retry in %s. err=%v\", t.Config.Name, t.Config.RetryTimer, err),\n\t\t}:\n\t\tcase <-ctx.Done():\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tcancel()\n\t\tgoto SUBSC\n\t}\n\n\tswitch req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_STREAM:\n\t\terr = t.handleStreamSubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase t.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              err,\n\t\t\t}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase t.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              fmt.Errorf(\"retrying in %s\", t.Config.RetryTimer),\n\t\t\t}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcancel()\n\t\t\tgoto SUBSC\n\t\t}\n\tcase gnmi.SubscriptionList_ONCE:\n\t\terr = t.handleONCESubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase t.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              err,\n\t\t\t}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase t.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              fmt.Errorf(\"retrying in %s\", t.Config.RetryTimer),\n\t\t\t}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcancel()\n\t\t\tgoto SUBSC\n\t\t}\n\t\tcancel()\n\t\treturn\n\tcase gnmi.SubscriptionList_POLL:\n\t\tgo t.listenPolls(nctx)\n\t\terr = t.handlePollSubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase t.errors <- &TargetError{\n\t\t\t\tSubscriptionName: subscriptionName,\n\t\t\t\tErr:              err,\n\t\t\t}:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcancel()\n\t\t\tgoto SUBSC\n\t\t}\n\t}\n\tcancel()\n}\n\nfunc (t *Target) SubscribeChan(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) (chan *SubscribeResponse, chan *TargetError) {\n\tresponseCh := make(chan *SubscribeResponse, 1)\n\terrCh := make(chan *TargetError, 1)\n\n\tgo func() {\n\t\tdefer close(responseCh)\n\t\tdefer close(errCh)\n\n\t\tfirstAttempt := true\n\t\tfor {\n\t\t\t// retry delay, skipped the first attempt\n\t\t\tif !firstAttempt {\n\t\t\t\ttimer := time.NewTimer(t.Config.RetryTimer)\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\ttimer.Stop()\n\t\t\t\t\treturn\n\t\t\t\tcase <-timer.C:\n\t\t\t\t}\n\t\t\t}\n\t\t\tfirstAttempt = false\n\n\t\t\t// check if parent context is done\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// attempt subscription\n\t\t\t// return true if retry is needed\n\t\t\tshouldRetry := t.attemptSubscription(ctx, req, subscriptionName, responseCh, errCh)\n\t\t\tif !shouldRetry {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn responseCh, errCh\n}\n\nfunc (t *Target) attemptSubscription(ctx context.Context, req *gnmi.SubscribeRequest,\n\tsubscriptionName string, responseCh chan *SubscribeResponse, errCh chan *TargetError) bool {\n\t// create child context for this attempt\n\tnctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tnctx = t.appendRequestMetadata(nctx)\n\n\t// create subscribe client\n\tsubscribeClient, err := t.Client.Subscribe(nctx, t.callOpts()...)\n\tif err != nil {\n\t\t// check if cancellation was intentional\n\t\tif isCancellationError(err) {\n\t\t\treturn false\n\t\t}\n\t\tsendError(errCh, ctx, subscriptionName, err)\n\t\treturn true\n\t}\n\n\t// store subscription state and register cleanup\n\tt.m.Lock()\n\tif oldCancel, ok := t.subscribeCancelFn[subscriptionName]; ok {\n\t\toldCancel() // cancel previous attempt\n\t}\n\tt.SubscribeClients[subscriptionName] = subscribeClient\n\tt.subscribeCancelFn[subscriptionName] = cancel\n\tsubConfig := t.Subscriptions[subscriptionName]\n\tt.m.Unlock()\n\n\t// cleanup on exit (registered after state is stored)\n\tdefer t.StopSubscription(subscriptionName)\n\n\t// send initial subscribe request\n\terr = subscribeClient.Send(req)\n\tif err != nil {\n\t\tsendError(errCh, ctx, subscriptionName,\n\t\t\tfmt.Errorf(\"target '%s' send error, retry in %s: %w\",\n\t\t\t\tt.Config.Name, t.Config.RetryTimer, err))\n\t\treturn true\n\t}\n\n\t// handle subscription based on mode\n\tswitch req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_STREAM:\n\t\treturn t.handleSTREAMMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh)\n\n\tcase gnmi.SubscriptionList_ONCE:\n\t\treturn t.handleONCEMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh)\n\n\tcase gnmi.SubscriptionList_POLL:\n\t\treturn t.handlePOLLMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh)\n\t}\n\n\treturn false\n}\n\nfunc (t *Target) handleSTREAMMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient,\n\tsubscriptionName string, subConfig *types.SubscriptionConfig,\n\tresponseCh chan *SubscribeResponse, errCh chan *TargetError) bool {\n\n\terr := t.handleStreamSubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh)\n\tif err != nil {\n\t\tif isCancellationError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tsendError(errCh, ctx, subscriptionName, err)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (t *Target) handleONCEMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient,\n\tsubscriptionName string, subConfig *types.SubscriptionConfig,\n\tresponseCh chan *SubscribeResponse, errCh chan *TargetError) bool {\n\n\terr := t.handleONCESubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh)\n\tif err != nil {\n\t\tif isCancellationError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tsendError(errCh, ctx, subscriptionName, err)\n\n\t\t// ONCE mode doesn't retry on EOF\n\t\tif errors.Is(err, io.EOF) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (t *Target) handlePOLLMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient,\n\tsubscriptionName string, subConfig *types.SubscriptionConfig,\n\tresponseCh chan *SubscribeResponse, errCh chan *TargetError) bool {\n\n\t// Start poll listener once per target (not per subscription attempt)\n\t// This prevents goroutine leaks on retry\n\tt.m.Lock()\n\tif t.pollChan == nil {\n\t\tt.pollChan = make(chan string, 10)\n\t\tgo t.listenPolls(ctx) // Use parent context, not nctx\n\t}\n\tt.m.Unlock()\n\n\terr := t.handlePollSubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh)\n\tif err != nil {\n\t\tif isCancellationError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tsendError(errCh, ctx, subscriptionName, err)\n\t\t// sendError(errCh, ctx, subscriptionName,\n\t\t// \tfmt.Errorf(\"retrying in %s\", t.Config.RetryTimer))\n\t\treturn true\n\t}\n\treturn false\n}\n\n// check if error is due to intentional cancellation\nfunc isCancellationError(err error) bool {\n\tif errors.Is(err, context.Canceled) {\n\t\treturn true\n\t}\n\tst, ok := status.FromError(err)\n\treturn ok && st.Code() == codes.Canceled\n}\n\n// send error to channel with context awareness\nfunc sendError(errCh chan *TargetError, ctx context.Context, subscriptionName string, err error) bool {\n\tselect {\n\tcase errCh <- &TargetError{\n\t\tSubscriptionName: subscriptionName,\n\t\tErr:              err,\n\t}:\n\t\treturn true\n\tcase <-ctx.Done():\n\t\treturn false\n\t}\n}\n\nfunc (t *Target) SubscribeStreamChan(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) (chan *gnmi.SubscribeResponse, chan error) {\n\tresponseCh := make(chan *gnmi.SubscribeResponse)\n\terrCh := make(chan error)\n\n\tgo func() {\n\t\tif req.GetSubscribe().GetMode() != gnmi.SubscriptionList_STREAM {\n\t\t\terrCh <- fmt.Errorf(\"subscribe request does not define a STREAM subscription: %v\", req.GetSubscribe().GetMode())\n\t\t\tclose(errCh)\n\t\t\tclose(responseCh)\n\t\t\treturn\n\t\t}\n\t\tvar subscribeClient gnmi.GNMI_SubscribeClient\n\t\tvar nctx context.Context\n\t\tvar cancel context.CancelFunc\n\t\tvar err error\n\t\tgoto SUBSC_NODELAY\n\tSUBSC:\n\t\t{\n\t\t\tretry := time.NewTimer(t.Config.RetryTimer)\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tretry.Stop()\n\t\t\t\treturn\n\t\t\tcase <-retry.C:\n\t\t\t}\n\t\t}\n\tSUBSC_NODELAY:\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tnctx, cancel = context.WithCancel(ctx)\n\t\t\tdefer cancel()\n\t\t\tnctx = t.appendRequestMetadata(nctx)\n\t\t\tsubscribeClient, err = t.Client.Subscribe(nctx, t.callOpts()...)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- fmt.Errorf(\"failed to create a subscribe client, target='%s', retry in %s. err=%v\", t.Config.Name, t.Config.RetryTimer, err)\n\t\t\t\tcancel()\n\t\t\t\tgoto SUBSC\n\t\t\t}\n\t\t}\n\t\tt.m.Lock()\n\t\tif cfn, ok := t.subscribeCancelFn[subscriptionName]; ok {\n\t\t\tcfn()\n\t\t}\n\t\tt.SubscribeClients[subscriptionName] = subscribeClient\n\t\tt.subscribeCancelFn[subscriptionName] = cancel\n\t\tt.m.Unlock()\n\n\t\terr = subscribeClient.Send(req)\n\t\tif err != nil {\n\t\t\terrCh <- fmt.Errorf(\"target '%s' send error, retry in %s. err=%v\", t.Config.Name, t.Config.RetryTimer, err)\n\t\t\tcancel()\n\t\t\tgoto SUBSC\n\t\t}\n\n\t\tfor {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\terrCh <- ctx.Err()\n\t\t\t\tcancel()\n\t\t\t\tgoto SUBSC\n\t\t\t}\n\t\t\tresponse, err := subscribeClient.Recv()\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\tcancel()\n\t\t\t\tgoto SUBSC\n\t\t\t}\n\t\t\tresponseCh <- response\n\t\t}\n\t}()\n\treturn responseCh, errCh\n}\n\nfunc (t *Target) SubscribeOnceChan(ctx context.Context, req *gnmi.SubscribeRequest) (chan *gnmi.SubscribeResponse, chan error) {\n\tresponseCh := make(chan *gnmi.SubscribeResponse)\n\terrCh := make(chan error)\n\tgo func() {\n\t\tnctx, cancel := context.WithCancel(ctx)\n\t\tdefer cancel()\n\n\t\tnctx = t.appendRequestMetadata(nctx)\n\t\tsubscribeClient, err := t.Client.Subscribe(nctx, t.callOpts()...)\n\t\tif err != nil {\n\t\t\terrCh <- err\n\t\t\treturn\n\t\t}\n\t\terr = subscribeClient.Send(req)\n\t\tif err != nil {\n\t\t\terrCh <- err\n\t\t\treturn\n\t\t}\n\t\tfor {\n\t\t\tresponse, err := subscribeClient.Recv()\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresponseCh <- response\n\t\t}\n\t}()\n\n\treturn responseCh, errCh\n}\n\nfunc (t *Target) SubscribeOnce(ctx context.Context, req *gnmi.SubscribeRequest) ([]*gnmi.SubscribeResponse, error) {\n\tresponses := make([]*gnmi.SubscribeResponse, 0)\n\trspChan, errChan := t.SubscribeOnceChan(ctx, req)\nLOOP:\n\tfor {\n\t\tselect {\n\t\tcase r := <-rspChan:\n\t\t\tswitch r.Response.(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tresponses = append(responses, r)\n\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\tcase err := <-errChan: // only non nil errors\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn responses, nil\n}\n\nfunc (t *Target) SubscribePoll(ctx context.Context, subName string) error {\n\tt.m.Lock()\n\tstream, ok := t.SubscribeClients[subName]\n\tt.m.Unlock()\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown subscription name %q\", subName)\n\t}\n\treturn stream.Send(&gnmi.SubscribeRequest{\n\t\tRequest: &gnmi.SubscribeRequest_Poll{\n\t\t\tPoll: new(gnmi.Poll),\n\t\t},\n\t})\n}\n\nfunc (t *Target) ReadSubscriptions() (chan *SubscribeResponse, chan *TargetError) {\n\treturn t.subscribeResponses, t.errors\n}\n\nfunc (t *Target) NumberOfOnceSubscriptions() int {\n\tnum := 0\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tfor _, sub := range t.Subscriptions {\n\t\tif strings.ToUpper(sub.Mode) == \"ONCE\" {\n\t\t\tnum++\n\t\t}\n\t}\n\treturn num\n}\n\nfunc (t *Target) DecodeProtoBytes(resp *gnmi.SubscribeResponse) error {\n\tif t.RootDesc == nil {\n\t\treturn nil\n\t}\n\tswitch resp := resp.Response.(type) {\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tfor _, update := range resp.Update.Update {\n\t\t\tswitch update.Val.Value.(type) {\n\t\t\tcase *gnmi.TypedValue_ProtoBytes:\n\t\t\t\tm := dynamic.NewMessage(t.RootDesc.GetFile().FindMessage(\"Nokia.SROS.root\"))\n\t\t\t\terr := m.Unmarshal(update.Val.GetProtoBytes())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tjsondata, err := m.MarshalJSON()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tupdate.Val.Value = &gnmi.TypedValue_JsonVal{JsonVal: jsondata}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (t *Target) DeleteSubscription(name string) {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tif _, ok := t.subscribeCancelFn[name]; ok {\n\t\tt.subscribeCancelFn[name]()\n\t}\n\tdelete(t.subscribeCancelFn, name)\n\tdelete(t.SubscribeClients, name)\n\tdelete(t.Subscriptions, name)\n}\n\nfunc (t *Target) StopSubscription(name string) {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tcfn, ok := t.subscribeCancelFn[name]\n\tif ok {\n\t\tcfn()\n\t}\n\tdelete(t.subscribeCancelFn, name)\n\tdelete(t.SubscribeClients, name)\n}\n\nfunc (t *Target) listenPolls(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase subName := <-t.pollChan:\n\t\t\terr := t.SubscribePoll(ctx, subName)\n\t\t\tif err != nil {\n\t\t\t\tt.errors <- &TargetError{\n\t\t\t\t\tSubscriptionName: subName,\n\t\t\t\t\tErr:              fmt.Errorf(\"failed to send PollRequest to subscription %s: %v\", subName, err),\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (t *Target) handleStreamSubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error {\n\tfor {\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil\n\t\t}\n\t\tresponse, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tselect {\n\t\tcase ch <- &SubscribeResponse{\n\t\t\tSubscriptionName:   subscriptionName,\n\t\t\tSubscriptionConfig: subConfig,\n\t\t\tResponse:           response,\n\t\t}:\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (t *Target) handleONCESubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error {\n\tfor {\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil\n\t\t}\n\t\tresponse, err := stream.Recv()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase ch <- &SubscribeResponse{\n\t\t\tSubscriptionName:   subscriptionName,\n\t\t\tSubscriptionConfig: subConfig,\n\t\t\tResponse:           response,\n\t\t}:\n\t\t}\n\n\t\tswitch response.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (t *Target) handlePollSubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tresponse, err := stream.Recv()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil\n\t\t\tcase ch <- &SubscribeResponse{\n\t\t\t\tSubscriptionName:   subscriptionName,\n\t\t\t\tSubscriptionConfig: subConfig,\n\t\t\t\tResponse:           response,\n\t\t\t}:\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/api/target/target.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage target\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/jhump/protoreflect/desc\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"golang.org/x/net/proxy\"\n\t\"golang.org/x/oauth2\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/connectivity\"\n\t\"google.golang.org/grpc/credentials/oauth\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\ntype TargetError struct {\n\tSubscriptionName string\n\tErr              error\n}\n\n// SubscribeResponse //\ntype SubscribeResponse struct {\n\tSubscriptionName   string\n\tSubscriptionConfig *types.SubscriptionConfig\n\tResponse           *gnmi.SubscribeResponse\n}\n\n// Target represents a gNMI enabled box\ntype Target struct {\n\tConfig        *types.TargetConfig                  `json:\"config,omitempty\"`\n\tSubscriptions map[string]*types.SubscriptionConfig `json:\"subscriptions,omitempty\"`\n\n\tm                 *sync.Mutex\n\tconn              *grpc.ClientConn\n\tClient            gnmi.GNMIClient                      `json:\"-\"`\n\tSubscribeClients  map[string]gnmi.GNMI_SubscribeClient `json:\"-\"` // subscription name to subscribeClient\n\tsubscribeCancelFn map[string]context.CancelFunc\n\n\tpollChan           chan string // subscription name to be polled\n\tsubscribeResponses chan *SubscribeResponse\n\terrors             chan *TargetError\n\n\tstopped  bool\n\tStopChan chan struct{}      `json:\"-\"`\n\tCfn      context.CancelFunc `json:\"-\"`\n\tRootDesc desc.Descriptor    `json:\"-\"`\n}\n\n// NewTarget //\nfunc NewTarget(c *types.TargetConfig) *Target {\n\tt := &Target{\n\t\tConfig:             c,\n\t\tSubscriptions:      make(map[string]*types.SubscriptionConfig),\n\t\tm:                  new(sync.Mutex),\n\t\tSubscribeClients:   make(map[string]gnmi.GNMI_SubscribeClient),\n\t\tsubscribeCancelFn:  make(map[string]context.CancelFunc),\n\t\tpollChan:           make(chan string),\n\t\tsubscribeResponses: make(chan *SubscribeResponse, c.BufferSize),\n\t\terrors:             make(chan *TargetError, c.BufferSize),\n\t\tStopChan:           make(chan struct{}),\n\t}\n\treturn t\n}\n\n// CreateGNMIClient //\nfunc (t *Target) CreateGNMIClient(ctx context.Context, opts ...grpc.DialOption) error {\n\ttOpts, err := t.Config.GrpcDialOptions()\n\tif err != nil {\n\t\treturn err\n\t}\n\topts = append(opts, tOpts...)\n\t// create a gRPC connection\n\taddrs := strings.Split(t.Config.Address, \",\")\n\tnumAddrs := len(addrs)\n\terrC := make(chan error, numAddrs)\n\tconnC := make(chan *grpc.ClientConn)\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tfor _, addr := range addrs {\n\t\tgo func(addr string) {\n\t\t\t// copy opts\n\t\t\toptsCopy := make([]grpc.DialOption, len(opts))\n\t\t\tcopy(optsCopy, opts)\n\t\t\ttimeoutCtx, cancel := context.WithTimeout(ctx, t.Config.Timeout)\n\t\t\tdefer cancel()\n\n\t\t\t// add the local custom dialer only if the target is a not tunneled.\n\t\t\tif t.Config.TunnelTargetType == \"\" {\n\t\t\t\toptsCopy = append(optsCopy, grpc.WithContextDialer(t.createDialer(addr)))\n\t\t\t}\n\t\t\tconn, err := grpc.DialContext(timeoutCtx, addr, optsCopy...)\n\t\t\tif err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"%s: %v\", addr, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase connC <- conn:\n\t\t\tcase <-done:\n\t\t\t\tif conn != nil {\n\t\t\t\t\tconn.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t}(addr)\n\t}\n\terrs := make([]string, 0, numAddrs)\n\tfor {\n\t\tselect {\n\t\tcase conn := <-connC:\n\t\t\tclose(done)\n\t\t\tt.conn = conn\n\t\t\tt.Client = gnmi.NewGNMIClient(conn)\n\t\t\treturn nil\n\t\tcase err := <-errC:\n\t\t\terrs = append(errs, err.Error())\n\t\t\tif len(errs) == numAddrs {\n\t\t\t\treturn fmt.Errorf(\"%s\", strings.Join(errs, \", \"))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (t *Target) createDialer(addr string) func(context.Context, string) (net.Conn, error) {\n\t// socks5 proxy dialer\n\tif t.Config.Proxy != \"\" {\n\t\tif idx := strings.Index(t.Config.Proxy, \"://\"); idx >= 0 {\n\t\t\tproxyType := t.Config.Proxy[:idx]\n\t\t\tproxyAddress := t.Config.Proxy[idx+3:]\n\t\t\tif proxyType == \"socks5\" {\n\t\t\t\treturn t.createProxyDialer(proxyAddress)\n\t\t\t}\n\t\t}\n\t}\n\t// non socks5 proxy or non-proxied dialer\n\treturn t.createCustomDialer(addr)\n}\n\nfunc (t *Target) createProxyDialer(addr string) func(context.Context, string) (net.Conn, error) {\n\treturn func(_ context.Context, targetAddr string) (net.Conn, error) {\n\t\tdialer, err := proxy.SOCKS5(\"tcp\", addr, nil,\n\t\t\t&net.Dialer{\n\t\t\t\tTimeout:   t.Config.Timeout,\n\t\t\t\tKeepAlive: t.Config.TCPKeepalive,\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn dialer.Dial(\"tcp\", targetAddr)\n\t}\n}\n\nfunc (t *Target) createCustomDialer(addr string) func(context.Context, string) (net.Conn, error) {\n\treturn func(ctx context.Context, _ string) (net.Conn, error) {\n\t\tdialer := net.Dialer{\n\t\t\tTimeout:   t.Config.Timeout,\n\t\t\tKeepAlive: t.Config.TCPKeepalive,\n\t\t}\n\t\tctx, cancel := context.WithTimeout(ctx, t.Config.Timeout)\n\t\tdefer cancel()\n\n\t\tvar networkType = \"tcp\"\n\t\tif indx := strings.Index(addr, \"://\"); indx > 0 {\n\t\t\tif addr[:indx] == \"unix\" {\n\t\t\t\tnetworkType = \"unix\"\n\t\t\t\taddr = addr[indx+3:]\n\t\t\t}\n\t\t}\n\t\treturn dialer.DialContext(ctx, networkType, addr)\n\t}\n}\n\nfunc (t *Target) callOpts() []grpc.CallOption {\n\tif t.Config.AuthScheme == \"\" {\n\t\treturn nil\n\t}\n\tcallOpts := make([]grpc.CallOption, 0, 1)\n\n\tvar auth string\n\tif t.Config.Username != nil {\n\t\tauth = *t.Config.Username\n\t}\n\tauth += \":\"\n\tif t.Config.Password != nil {\n\t\tauth += *t.Config.Password\n\t}\n\n\tcallOpts = append(callOpts,\n\t\tgrpc.PerRPCCredentials(\n\t\t\toauth.TokenSource{\n\t\t\t\tTokenSource: oauth2.StaticTokenSource(\n\t\t\t\t\t&oauth2.Token{\n\t\t\t\t\t\tAccessToken: base64.StdEncoding.EncodeToString([]byte(auth)),\n\t\t\t\t\t\tTokenType:   t.Config.AuthScheme,\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t))\n\n\treturn callOpts\n}\n\nfunc (t *Target) appendRequestMetadata(ctx context.Context) context.Context {\n\tctx = t.appendCredentials(ctx)\n\tctx = t.appendMetadata(ctx)\n\treturn ctx\n}\n\nfunc (t *Target) appendCredentials(ctx context.Context) context.Context {\n\tif t.Config.AuthScheme != \"\" {\n\t\treturn ctx\n\t}\n\n\tif t.Config.Username != nil && *t.Config.Username != \"\" {\n\t\tctx = metadata.AppendToOutgoingContext(ctx, \"username\", *t.Config.Username)\n\t}\n\tif t.Config.Password != nil && *t.Config.Password != \"\" {\n\t\tctx = metadata.AppendToOutgoingContext(ctx, \"password\", *t.Config.Password)\n\t}\n\treturn ctx\n}\n\nfunc (t *Target) appendMetadata(ctx context.Context) context.Context {\n\tvar pairs []string\n\tfor k, v := range t.Config.Metadata {\n\t\tpairs = append(pairs, k, v)\n\t}\n\treturn metadata.AppendToOutgoingContext(ctx, pairs...)\n}\n\n// Capabilities sends a gnmi.CapabilitiesRequest to the target *t and returns a gnmi.CapabilitiesResponse and an error\nfunc (t *Target) Capabilities(ctx context.Context, ext ...*gnmi_ext.Extension) (*gnmi.CapabilityResponse, error) {\n\treturn t.Client.Capabilities(t.appendRequestMetadata(ctx), &gnmi.CapabilityRequest{Extension: ext}, t.callOpts()...)\n}\n\n// Get sends a gnmi.GetRequest to the target *t and returns a gnmi.GetResponse and an error\nfunc (t *Target) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\treturn t.Client.Get(t.appendRequestMetadata(ctx), req, t.callOpts()...)\n}\n\n// Set sends a gnmi.SetRequest to the target *t and returns a gnmi.SetResponse and an error\nfunc (t *Target) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\treturn t.Client.Set(t.appendRequestMetadata(ctx), req, t.callOpts()...)\n}\n\nfunc (t *Target) StopSubscriptions() {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tfor _, cfn := range t.subscribeCancelFn {\n\t\tcfn()\n\t}\n\tif t.Cfn != nil {\n\t\tt.Cfn()\n\t}\n\tif !t.stopped {\n\t\tclose(t.StopChan)\n\t}\n\tt.stopped = true\n}\n\nfunc (t *Target) Close() error {\n\tt.StopSubscriptions()\n\tif t.conn != nil {\n\t\treturn t.conn.Close()\n\t}\n\treturn nil\n}\n\n// SubscribeClientStates returns current subscription states.\n// based on the SubscribeClients map.\nfunc (t *Target) SubscribeClientStates() map[string]bool {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tif len(t.Subscriptions) == 0 {\n\t\treturn nil\n\t}\n\tstates := make(map[string]bool, len(t.Subscriptions))\n\tfor name := range t.Subscriptions {\n\t\t_, ok := t.SubscribeClients[name]\n\t\tstates[name] = ok\n\t}\n\treturn states\n}\n\nfunc (t *Target) ConnState() string {\n\tif t.conn == nil {\n\t\treturn \"\"\n\t}\n\treturn t.conn.GetState().String()\n}\n\n// WaitForConnStateChange blocks until the gRPC connection state changes from\n// sourceState or ctx is done. Returns true if the state changed, false if\n// ctx expired. Returns false immediately if conn is nil.\nfunc (t *Target) WaitForConnStateChange(ctx context.Context, sourceState connectivity.State) bool {\n\tif t.conn == nil {\n\t\treturn false\n\t}\n\treturn t.conn.WaitForStateChange(ctx, sourceState)\n}\n\n// ConnectivityState returns the current gRPC connectivity state.\n// Returns connectivity.Shutdown if the connection is nil.\nfunc (t *Target) ConnectivityState() connectivity.State {\n\tif t.conn == nil {\n\t\treturn connectivity.Shutdown\n\t}\n\treturn t.conn.GetState()\n}\n"
  },
  {
    "path": "pkg/api/target.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/AlekSi/pointer\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nvar DefaultTargetTimeout = 10 * time.Second\n\ntype TargetOption func(*target.Target) error\n\nfunc NewTarget(opts ...TargetOption) (*target.Target, error) {\n\tt := target.NewTarget(&types.TargetConfig{})\n\tvar err error\n\tfor _, o := range opts {\n\t\terr = o(t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif t.Config.Address == \"\" {\n\t\treturn nil, errors.New(\"missing address\")\n\t}\n\tif t.Config.Name == \"\" {\n\t\tt.Config.Name = strings.Split(t.Config.Address, \",\")[0]\n\t}\n\tif t.Config.Timeout == 0 {\n\t\tt.Config.Timeout = DefaultTargetTimeout\n\t}\n\tif t.Config.Insecure == nil && t.Config.SkipVerify == nil {\n\t\tt.Config.Insecure = pointer.ToBool(false)\n\t\tt.Config.SkipVerify = pointer.ToBool(false)\n\t}\n\tif t.Config.SkipVerify == nil {\n\t\tt.Config.SkipVerify = pointer.ToBool(false)\n\t}\n\tif t.Config.Insecure == nil {\n\t\tt.Config.Insecure = pointer.ToBool(false)\n\t}\n\treturn t, nil\n}\n\n// Name sets the target name.\nfunc Name(name string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Name = name\n\t\treturn nil\n\t}\n}\n\n// Address sets the target address.\n// This Option can be set multiple times.\nfunc Address(addr string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tif t.Config.Address != \"\" {\n\t\t\tt.Config.Address = strings.Join([]string{t.Config.Address, addr}, \",\")\n\t\t\treturn nil\n\t\t}\n\t\tt.Config.Address = addr\n\t\treturn nil\n\t}\n}\n\n// Username sets the target Username.\nfunc Username(username string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Username = pointer.ToString(username)\n\t\treturn nil\n\t}\n}\n\n// Password sets the target Password.\nfunc Password(password string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Password = pointer.ToString(password)\n\t\treturn nil\n\t}\n}\n\n// Timeout sets the gNMI client creation timeout.\nfunc Timeout(timeout time.Duration) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Timeout = timeout\n\t\treturn nil\n\t}\n}\n\n// Insecure sets the option to create a gNMI client with an\n// insecure gRPC connection\nfunc Insecure(i bool) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Insecure = pointer.ToBool(i)\n\t\treturn nil\n\t}\n}\n\n// SkipVerify sets the option to create a gNMI client with a\n// secure gRPC connection without verifying the target's certificates.\nfunc SkipVerify(i bool) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.SkipVerify = pointer.ToBool(i)\n\t\treturn nil\n\t}\n}\n\n// TLSCA sets that path towards the TLS certificate authority file.\nfunc TLSCA(tlsca string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSCA = pointer.ToString(tlsca)\n\t\treturn nil\n\t}\n}\n\n// TLSCert sets that path towards the TLS certificate file.\nfunc TLSCert(cert string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSCert = pointer.ToString(cert)\n\t\treturn nil\n\t}\n}\n\n// TLSKey sets that path towards the TLS key file.\nfunc TLSKey(key string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSKey = pointer.ToString(key)\n\t\treturn nil\n\t}\n}\n\n// TLSMinVersion sets the TLS minimum version used during the TLS handshake.\nfunc TLSMinVersion(v string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSMinVersion = v\n\t\treturn nil\n\t}\n}\n\n// TLSMaxVersion sets the TLS maximum version used during the TLS handshake.\nfunc TLSMaxVersion(v string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSMaxVersion = v\n\t\treturn nil\n\t}\n}\n\n// TLSVersion sets the desired TLS version used during the TLS handshake.\nfunc TLSVersion(v string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.TLSVersion = v\n\t\treturn nil\n\t}\n}\n\n// TLSConfig\nfunc TLSConfig(tlsconfig *tls.Config) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.SetTLSConfig(tlsconfig)\n\t\treturn nil\n\t}\n}\n\n// LogTLSSecret, if set to true,\n// enables logging of the TLS master key.\nfunc LogTLSSecret(b bool) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.LogTLSSecret = pointer.ToBool(b)\n\t\treturn nil\n\t}\n}\n\n// Gzip, if set to true,\n// adds gzip compression to the gRPC connection.\nfunc Gzip(b bool) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Gzip = pointer.ToBool(b)\n\t\treturn nil\n\t}\n}\n\n// Token sets the per RPC credentials for all RPC calls.\nfunc Token(token string) TargetOption {\n\treturn func(t *target.Target) error {\n\t\tt.Config.Token = pointer.ToString(token)\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "pkg/api/target_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"testing\"\n\n\t\"github.com/AlekSi/pointer\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\ntype input struct {\n\topts   []TargetOption\n\tconfig *types.TargetConfig\n}\n\nvar targetTestSet = map[string]input{\n\t\"address\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tInsecure(true),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(true),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t},\n\t},\n\t\"username\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tUsername(\"admin\"),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tUsername:   pointer.ToString(\"admin\"),\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t},\n\t},\n\t\"two_addresses\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tAddress(\"10.0.0.2:57400\"),\n\t\t\tInsecure(true),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400,10.0.0.2:57400\",\n\t\t\tInsecure:   pointer.ToBool(true),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t},\n\t},\n\t\"skip_verify\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tSkipVerify(true),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(true),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t},\n\t},\n\t\"tlsca\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tTLSCA(\"tlsca_path\"),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t\tTLSCA:      pointer.ToString(\"tlsca_path\"),\n\t\t},\n\t},\n\t\"tls_key_cert\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tTLSKey(\"tlskey_path\"),\n\t\t\tTLSCert(\"tlscert_path\"),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t\tTLSKey:     pointer.ToString(\"tlskey_path\"),\n\t\t\tTLSCert:    pointer.ToString(\"tlscert_path\"),\n\t\t},\n\t},\n\t\"token\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tToken(\"token_value\"),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t\tToken:      pointer.ToString(\"token_value\"),\n\t\t},\n\t},\n\t\"gzip\": {\n\t\topts: []TargetOption{\n\t\t\tAddress(\"10.0.0.1:57400\"),\n\t\t\tGzip(true),\n\t\t},\n\t\tconfig: &types.TargetConfig{\n\t\t\tName:       \"10.0.0.1:57400\",\n\t\t\tAddress:    \"10.0.0.1:57400\",\n\t\t\tInsecure:   pointer.ToBool(false),\n\t\t\tSkipVerify: pointer.ToBool(false),\n\t\t\tTimeout:    DefaultTargetTimeout,\n\t\t\tGzip:       pointer.ToBool(true),\n\t\t},\n\t},\n}\n\nfunc TestNewTarget(t *testing.T) {\n\tfor name, item := range targetTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttg, err := NewTarget(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed at %q: %v\", name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif tg.Config.String() != item.config.String() {\n\t\t\t\tt.Errorf(\"failed at %q\", name)\n\t\t\t\tt.Errorf(\"expected %+v\", item.config)\n\t\t\t\tt.Errorf(\"     got %+v\", tg.Config)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/api/testutils/utils.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage testutils\n\nimport (\n\t\"bytes\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\ttpb \"github.com/openconfig/grpctunnel/proto/tunnel\"\n)\n\nfunc CapabilitiesResponsesEqual(rsp1, rsp2 *gnmi.CapabilityResponse) bool {\n\tif rsp1 == nil && rsp2 == nil {\n\t\treturn true\n\t}\n\tif rsp1 == nil || rsp2 == nil {\n\t\treturn false\n\t}\n\tif rsp1.GNMIVersion != rsp2.GNMIVersion {\n\t\treturn false\n\t}\n\tif len(rsp1.SupportedEncodings) != len(rsp2.SupportedEncodings) {\n\t\treturn false\n\t}\n\tif len(rsp1.SupportedModels) != len(rsp2.SupportedModels) {\n\t\treturn false\n\t}\n\tfor i := range rsp1.SupportedEncodings {\n\t\tif rsp1.SupportedEncodings[i] != rsp2.SupportedEncodings[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range rsp1.SupportedModels {\n\t\tif !cmp.Equal(rsp1.SupportedModels[i], rsp2.SupportedModels[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GetRequestsEqual(req1, req2 *gnmi.GetRequest) bool {\n\tif req1 == nil && req2 == nil {\n\t\treturn true\n\t}\n\tif req1 == nil || req2 == nil {\n\t\treturn false\n\t}\n\tif req1.Encoding != req2.Encoding ||\n\t\treq1.Type != req2.Type {\n\t\treturn false\n\t}\n\tif !GnmiPathsEqual(req1.Prefix, req2.Prefix) {\n\t\treturn false\n\t}\n\tif len(req1.Path) != len(req2.Path) {\n\t\treturn false\n\t}\n\tfor i := range req1.Path {\n\t\tif !GnmiPathsEqual(req1.Path[i], req2.Path[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\tif len(req1.Extension) != len(req2.Extension) {\n\t\treturn false\n\t}\n\tif len(req1.UseModels) != len(req2.UseModels) {\n\t\treturn false\n\t}\n\tfor i := range req1.UseModels {\n\t\tif req1.UseModels[i].Name != req2.UseModels[i].Name {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc SetRequestsEqual(req1, req2 *gnmi.SetRequest) bool {\n\tif req1 == nil && req2 == nil {\n\t\treturn true\n\t}\n\tif req1 == nil || req2 == nil {\n\t\treturn false\n\t}\n\tif len(req1.GetDelete()) != len(req2.GetDelete()) ||\n\t\tlen(req1.GetReplace()) != len(req2.GetReplace()) ||\n\t\tlen(req1.GetUpdate()) != len(req2.GetUpdate()) {\n\t\treturn false\n\t}\n\tif !GnmiPathsEqual(req1.GetPrefix(), req2.GetPrefix()) {\n\t\treturn false\n\t}\n\tfor i := range req1.GetDelete() {\n\t\tif !GnmiPathsEqual(req1.GetDelete()[i], req2.GetDelete()[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range req1.GetUpdate() {\n\t\tif !GnmiPathsEqual(req1.GetUpdate()[i].GetPath(), req2.GetUpdate()[i].GetPath()) {\n\t\t\treturn false\n\t\t}\n\t\tif !cmp.Equal(req1.GetUpdate()[i].GetVal().GetValue(), req2.GetUpdate()[i].GetVal().GetValue()) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range req1.GetReplace() {\n\t\tif !GnmiPathsEqual(req1.GetReplace()[i].GetPath(), req2.GetReplace()[i].GetPath()) {\n\t\t\treturn false\n\t\t}\n\t\tif !cmp.Equal(req1.GetReplace()[i].GetVal().GetValue(), req2.GetReplace()[i].GetVal().GetValue()) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc SubscribeRequestsEqual(req1, req2 *gnmi.SubscribeRequest) bool {\n\tif req1 == nil && req2 == nil {\n\t\treturn true\n\t}\n\tif req1 == nil || req2 == nil {\n\t\treturn false\n\t}\n\tif len(req1.GetExtension()) != len(req2.GetExtension()) {\n\t\treturn false\n\t}\n\t// only checks if extensions are of the same type\n\tfor i, ext := range req1.GetExtension() {\n\t\tswitch ext.Ext.(type) {\n\t\tcase *gnmi_ext.Extension_RegisteredExt:\n\t\t\tswitch req2.GetExtension()[i].Ext.(type) {\n\t\t\tcase *gnmi_ext.Extension_RegisteredExt:\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase *gnmi_ext.Extension_History:\n\t\t\tswitch req2.GetExtension()[i].Ext.(type) {\n\t\t\tcase *gnmi_ext.Extension_History:\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase *gnmi_ext.Extension_MasterArbitration:\n\t\t\tswitch req2.GetExtension()[i].Ext.(type) {\n\t\t\tcase *gnmi_ext.Extension_MasterArbitration:\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\tswitch req1.Request.(type) {\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tswitch req2.Request.(type) {\n\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.SubscribeRequest_Poll:\n\t\tswitch req2.Request.(type) {\n\t\tcase *gnmi.SubscribeRequest_Poll:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\t// compare subscribe request subscribe\n\tswitch req1 := req1.Request.(type) {\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tswitch req2 := req2.Request.(type) {\n\t\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\t\tif req1.Subscribe.GetEncoding() != req2.Subscribe.GetEncoding() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif req1.Subscribe.GetMode() != req2.Subscribe.GetMode() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif req1.Subscribe.GetQos().GetMarking() != req2.Subscribe.GetQos().GetMarking() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(req1.Subscribe.GetSubscription()) != len(req2.Subscribe.GetSubscription()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif req1.Subscribe.GetUpdatesOnly() != req2.Subscribe.GetUpdatesOnly() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif req1.Subscribe.GetAllowAggregation() != req2.Subscribe.GetAllowAggregation() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !GnmiPathsEqual(req1.Subscribe.Prefix, req2.Subscribe.Prefix) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(req1.Subscribe.GetUseModels()) != len(req2.Subscribe.GetUseModels()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfor i := range req1.Subscribe.GetUseModels() {\n\t\t\t\tif req1.Subscribe.GetUseModels()[i].Name != req2.Subscribe.GetUseModels()[i].Name {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor i, sub := range req1.Subscribe.GetSubscription() {\n\t\t\t\tif !GnmiSubscriptionEqual(sub, req1.Subscribe.GetSubscription()[i]) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GetResponsesEqual(rsp1, rsp2 *gnmi.GetResponse) bool {\n\tif rsp1 == nil && rsp2 == nil {\n\t\treturn true\n\t}\n\tif rsp1 == nil || rsp2 == nil {\n\t\treturn false\n\t}\n\tif len(rsp1.GetNotification()) != len(rsp2.GetNotification()) {\n\t\treturn false\n\t}\n\tfor i := range rsp1.GetNotification() {\n\t\tif !GnmiNotificationsEqual(rsp1.GetNotification()[i], rsp2.GetNotification()[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc SetResponsesEqual(rsp1, rsp2 *gnmi.SetResponse) bool {\n\tif rsp1 == nil && rsp2 == nil {\n\t\treturn true\n\t}\n\tif rsp1 == nil || rsp2 == nil {\n\t\treturn false\n\t}\n\tif len(rsp1.GetResponse()) != len(rsp2.GetResponse()) {\n\t\treturn false\n\t}\n\tfor i := range rsp1.GetResponse() {\n\t\tif !GnmiUpdateResultEqual(rsp1.GetResponse()[i], rsp2.GetResponse()[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc SubscribeResponsesEqual(rsp1, rsp2 *gnmi.SubscribeResponse) bool {\n\tif rsp1 == nil && rsp2 == nil {\n\t\treturn true\n\t}\n\tif rsp1 == nil || rsp2 == nil {\n\t\treturn false\n\t}\n\n\tswitch rsp1.GetResponse().(type) {\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tswitch rsp2.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\tswitch rsp2.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tswitch rsp1 := rsp1.GetResponse().(type) {\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tswitch rsp2 := rsp2.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\treturn GnmiNotificationsEqual(rsp1.Update, rsp2.Update)\n\t\t}\n\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\tswitch rsp2 := rsp2.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\tif rsp1.SyncResponse != rsp2.SyncResponse {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc GnmiPathsEqual(p1, p2 *gnmi.Path) bool {\n\tif p1 == nil && p2 == nil {\n\t\treturn true\n\t}\n\tif p1 == nil || p2 == nil {\n\t\treturn false\n\t}\n\tif p1.Origin != p2.Origin {\n\t\treturn false\n\t}\n\tif p1.Target != p2.Target {\n\t\treturn false\n\t}\n\tif len(p1.Elem) != len(p2.Elem) {\n\t\treturn false\n\t}\n\tfor i, e := range p1.Elem {\n\t\tif e.Name != p2.Elem[i].Name {\n\t\t\treturn false\n\t\t}\n\t\tif !cmp.Equal(e.Key, p2.Elem[i].Key) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GnmiSubscriptionEqual(s1, s2 *gnmi.Subscription) bool {\n\tif s1 == nil && s2 != nil {\n\t\treturn false\n\t}\n\tif s1 != nil && s2 == nil {\n\t\treturn false\n\t}\n\tif s1.Mode != s2.Mode {\n\t\treturn false\n\t}\n\tif s1.SampleInterval != s2.SampleInterval {\n\t\treturn false\n\t}\n\tif s1.SuppressRedundant != s2.SuppressRedundant {\n\t\treturn false\n\t}\n\tif !GnmiPathsEqual(s1.Path, s2.Path) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc GnmiUpdatesEqual(u1, u2 *gnmi.Update) bool {\n\tif u1 == nil && u2 == nil {\n\t\treturn true\n\t}\n\tif u1 == nil || u2 == nil {\n\t\treturn false\n\t}\n\tif u1.GetDuplicates() != u2.GetDuplicates() {\n\t\treturn false\n\t}\n\tif !GnmiPathsEqual(u1.GetPath(), u2.GetPath()) {\n\t\treturn false\n\t}\n\treturn cmp.Equal(u1.GetVal().GetValue(), u2.GetVal().GetValue())\n}\n\nfunc GnmiNotificationsEqual(n1, n2 *gnmi.Notification) bool {\n\tif n1.GetAtomic() != n2.GetAtomic() {\n\t\treturn false\n\t}\n\t// compare timestamps\n\tif n1.GetTimestamp() != n2.GetTimestamp() {\n\t\treturn false\n\t}\n\t// compare prefixes\n\tif !GnmiPathsEqual(n1.GetPrefix(), n2.GetPrefix()) {\n\t\treturn false\n\t}\n\t// compare updates\n\tfor j := range n1.GetUpdate() {\n\t\tif !GnmiUpdatesEqual(n1.GetUpdate()[j], n2.GetUpdate()[j]) {\n\t\t\treturn false\n\t\t}\n\t}\n\t// compare deletes\n\tfor j := range n1.GetDelete() {\n\t\tif !GnmiPathsEqual(n1.GetDelete()[j], n2.GetDelete()[j]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc GnmiUpdateResultEqual(u1, u2 *gnmi.UpdateResult) bool {\n\tif u1 == nil && u2 == nil {\n\t\treturn true\n\t}\n\tif u1 == nil || u2 == nil {\n\t\treturn false\n\t}\n\tif u1.GetOp() != u2.GetOp() {\n\t\treturn false\n\t}\n\tif !GnmiPathsEqual(u1.GetPath(), u2.GetPath()) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc GnmiValuesEqual(v1, v2 *gnmi.TypedValue) bool {\n\tif v1 == nil && v2 == nil {\n\t\treturn true\n\t}\n\tif v1 == nil || v2 == nil {\n\t\treturn false\n\t}\n\tswitch v1 := v1.GetValue().(type) {\n\tcase *gnmi.TypedValue_AnyVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_AnyVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif v1.AnyVal == nil && v2.AnyVal == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1.AnyVal == nil || v2.AnyVal == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif v1.AnyVal.GetTypeUrl() != v2.AnyVal.GetTypeUrl() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.AnyVal.GetValue(), v2.AnyVal.GetValue())\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_AsciiVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_AsciiVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn v1.AsciiVal == v2.AsciiVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_BoolVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_BoolVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn v1.BoolVal == v2.BoolVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_BytesVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_BytesVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.BytesVal, v2.BytesVal)\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_DecimalVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_DecimalVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\t\tif v1.DecimalVal.GetDigits() != v2.DecimalVal.GetDigits() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\t\treturn v1.DecimalVal.GetPrecision() == v2.DecimalVal.GetPrecision()\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_FloatVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_FloatVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t//lint:ignore SA1019 still need FloatVal for backward compatibility\n\t\t\treturn v1.FloatVal == v2.FloatVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_IntVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_IntVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn v1.IntVal == v2.IntVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.JsonIetfVal, v2.JsonIetfVal)\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_JsonVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_JsonVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.JsonVal, v2.JsonVal)\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_LeaflistVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_LeaflistVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(v1.LeaflistVal.GetElement()) != len(v2.LeaflistVal.GetElement()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfor i := range v1.LeaflistVal.GetElement() {\n\t\t\t\tif !GnmiValuesEqual(v1.LeaflistVal.Element[i], v2.LeaflistVal.Element[i]) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_ProtoBytes:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_ProtoBytes:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.ProtoBytes, v2.ProtoBytes)\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_StringVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_StringVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn v1.StringVal == v2.StringVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *gnmi.TypedValue_UintVal:\n\t\tswitch v2 := v2.GetValue().(type) {\n\t\tcase *gnmi.TypedValue_UintVal:\n\t\t\tif v1 == nil && v2 == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1 == nil || v2 == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn v1.UintVal == v2.UintVal\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc RegisterOpEqual(r1, r2 *tpb.RegisterOp) bool {\n\tif r1 == nil && r2 == nil {\n\t\treturn true\n\t}\n\tif r1 == nil || r2 == nil {\n\t\treturn false\n\t}\n\tswitch r1 := r1.GetRegistration().(type) {\n\tcase *tpb.RegisterOp_Target:\n\t\tswitch r2 := r2.GetRegistration().(type) {\n\t\tcase *tpb.RegisterOp_Target:\n\t\t\tif r1.Target.GetAccept() != r2.Target.GetAccept() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Target.GetOp() != r2.Target.GetOp() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Target.GetTarget() != r2.Target.GetTarget() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Target.GetError() != r2.Target.GetError() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Target.GetTargetType() != r2.Target.GetTargetType() {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *tpb.RegisterOp_Session:\n\t\tswitch r2 := r2.GetRegistration().(type) {\n\t\tcase *tpb.RegisterOp_Session:\n\t\t\tif r1.Session.GetAccept() != r2.Session.GetAccept() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Session.GetTarget() != r2.Session.GetTarget() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Session.GetError() != r2.Session.GetError() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Session.GetTargetType() != r2.Session.GetTargetType() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Session.GetTag() != r2.Session.GetTag() {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\tcase *tpb.RegisterOp_Subscription:\n\t\tswitch r2 := r2.GetRegistration().(type) {\n\t\tcase *tpb.RegisterOp_Subscription:\n\t\t\tif r1.Subscription.GetAccept() != r2.Subscription.GetAccept() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Subscription.GetOp() != r2.Subscription.GetOp() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Subscription.GetError() != r2.Subscription.GetError() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif r1.Subscription.GetTargetType() != r2.Subscription.GetTargetType() {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TunnelDataEqual(r1, r2 *tpb.Data) bool {\n\tif r1 == nil && r2 == nil {\n\t\treturn true\n\t}\n\tif r1 == nil || r2 == nil {\n\t\treturn false\n\t}\n\tif r1.GetClose() != r2.GetClose() {\n\t\treturn false\n\t}\n\tif !bytes.Equal(r1.GetData(), r2.GetData()) {\n\t\treturn false\n\t}\n\tif r1.GetTag() != r2.GetTag() {\n\t\treturn false\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/api/tunnel.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"fmt\"\n\n\ttpb \"github.com/openconfig/grpctunnel/proto/tunnel\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\n// TunnelOption is a function that acts on the supplied proto.Message.\n// The message is expected to be one of the protobuf defined gRPC tunnel messages\n// exchanged by the RPCs or any of the nested messages.\ntype TunnelOption func(proto.Message) error\n\n// apply is a helper function that simply applies the options to the proto.Message.\n// It returns an error if any of the options fails.\nfunc applyTunnelOpts(m proto.Message, opts ...TunnelOption) error {\n\tfor _, o := range opts {\n\t\tif err := o(m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc NewRegisterOpTarget(opts ...TunnelOption) (*tpb.RegisterOp, error) {\n\tm := &tpb.RegisterOp{\n\t\tRegistration: new(tpb.RegisterOp_Target),\n\t}\n\terr := applyTunnelOpts(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc NewRegisterOpSession(opts ...TunnelOption) (*tpb.RegisterOp, error) {\n\tm := &tpb.RegisterOp{\n\t\tRegistration: new(tpb.RegisterOp_Session),\n\t}\n\terr := applyTunnelOpts(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc NewRegisterOpSubscription(opts ...TunnelOption) (*tpb.RegisterOp, error) {\n\tm := &tpb.RegisterOp{\n\t\tRegistration: new(tpb.RegisterOp_Subscription),\n\t}\n\terr := applyTunnelOpts(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc NewData(opts ...TunnelOption) (*tpb.Data, error) {\n\tm := new(tpb.Data)\n\terr := applyTunnelOpts(m, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Messages options\n\nfunc TunnelTarget(opts ...TunnelOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.RegisterOp:\n\t\t\tswitch msg := msg.Registration.(type) {\n\t\t\tcase *tpb.RegisterOp_Target:\n\t\t\t\ttarget := new(tpb.Target)\n\t\t\t\terr := applyTunnelOpts(target, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmsg.Target = target\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TunnelTarget: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc TunnelSession(opts ...TunnelOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.RegisterOp:\n\t\t\tswitch msg := msg.Registration.(type) {\n\t\t\tcase *tpb.RegisterOp_Session:\n\t\t\t\tsession := new(tpb.Session)\n\t\t\t\terr := applyTunnelOpts(session, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmsg.Session = session\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TunnelSession: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc TunnelSubscription(opts ...TunnelOption) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.RegisterOp:\n\t\t\tswitch msg := msg.Registration.(type) {\n\t\t\tcase *tpb.RegisterOp_Subscription:\n\t\t\t\tsubscription := new(tpb.Subscription)\n\t\t\t\terr := applyTunnelOpts(subscription, opts...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmsg.Subscription = subscription\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TunnelSubscription: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Common Options\nfunc TargetOpRemove() func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.Op = tpb.Target_REMOVE\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TargetOpRemove: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Accept(b bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.Accept = b\n\t\tcase *tpb.Session:\n\t\t\tmsg.Accept = b\n\t\tcase *tpb.Subscription:\n\t\t\tmsg.Accept = b\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Accept: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc TargetName(n string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.Target = n\n\t\tcase *tpb.Session:\n\t\t\tmsg.Target = n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TargetName: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc TargetType(typ string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.TargetType = typ\n\t\tcase *tpb.Session:\n\t\t\tmsg.TargetType = typ\n\t\tcase *tpb.Subscription:\n\t\t\tmsg.TargetType = typ\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TargetType: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Error(e string) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.Error = e\n\t\tcase *tpb.Session:\n\t\t\tmsg.Error = e\n\t\tcase *tpb.Subscription:\n\t\t\tmsg.Error = e\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Error: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Target Options\n\nfunc TargetOpAdd() func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Target:\n\t\t\tmsg.Op = tpb.Target_ADD\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option TargetOpAdd: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc Tag(t int32) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Session:\n\t\t\tmsg.Tag = t\n\t\tcase *tpb.Data:\n\t\t\tmsg.Tag = t\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Tag: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Subscription Options\n\nfunc SubscriptionOpSubscribe() func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Subscription:\n\t\t\tmsg.Op = tpb.Subscription_SUBCRIBE\n\t\t\t//\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SubscriptionOpSubscribe: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc SubscriptionOpUnsubscribe() func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Subscription:\n\t\t\tmsg.Op = tpb.Subscription_UNSUBCRIBE\n\t\t\t//\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option SubscriptionOpUnsubscribe: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n\n// Data Options\n\nfunc Data(d []byte) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Data:\n\t\t\tmsg.Data = d\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Data: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\nfunc Close(b bool) func(msg proto.Message) error {\n\treturn func(msg proto.Message) error {\n\t\tif msg == nil {\n\t\t\treturn ErrInvalidMsgType\n\t\t}\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *tpb.Data:\n\t\t\tmsg.Close = b\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"option Close: %w: %T\", ErrInvalidMsgType, msg)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "pkg/api/tunnel_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage api\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\ttpb \"github.com/openconfig/grpctunnel/proto/tunnel\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n)\n\ntype registerOpInput struct {\n\topts []TunnelOption\n\tmsg  *tpb.RegisterOp\n\terr  error\n}\n\nvar registerOpTargetTestSet = map[string]registerOpInput{\n\t\"target_add\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelTarget(\n\t\t\t\tTargetOpAdd(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Target{\n\t\t\t\tTarget: &tpb.Target{\n\t\t\t\t\tOp:         tpb.Target_ADD,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTarget:     \"target1\",\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"target_remove\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelTarget(\n\t\t\t\tTargetOpRemove(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Target{\n\t\t\t\tTarget: &tpb.Target{\n\t\t\t\t\tOp:         tpb.Target_REMOVE,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTarget:     \"target1\",\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"target_error\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelTarget(\n\t\t\t\tTargetOpRemove(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t\tError(\"err1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Target{\n\t\t\t\tTarget: &tpb.Target{\n\t\t\t\t\tOp:         tpb.Target_REMOVE,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTarget:     \"target1\",\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t\tError:      \"err1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"target_nok\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelTarget(\n\t\t\t\tTag(42),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: nil,\n\t\terr: ErrInvalidMsgType,\n\t},\n}\nvar registerOpSessionTestSet = map[string]registerOpInput{\n\t\"session_ok\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSession(\n\t\t\t\tTag(42),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Session{\n\t\t\t\tSession: &tpb.Session{\n\t\t\t\t\tTag:        42,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTarget:     \"target1\",\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"session_nok\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSession(\n\t\t\t\tTargetOpAdd(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: nil,\n\t\terr: ErrInvalidMsgType,\n\t},\n\t\"session_err\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSession(\n\t\t\t\tTag(42),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t\tError(\"err1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Session{\n\t\t\t\tSession: &tpb.Session{\n\t\t\t\t\tTag:        42,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTarget:     \"target1\",\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t\tError:      \"err1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n}\nvar registerOpSubscriptionTestSet = map[string]registerOpInput{\n\t\"subscription_op_subscribe\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSubscription(\n\t\t\t\tSubscriptionOpSubscribe(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Subscription{\n\t\t\t\tSubscription: &tpb.Subscription{\n\t\t\t\t\tOp:         tpb.Subscription_SUBCRIBE,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"subscription_op_unsubscribe\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSubscription(\n\t\t\t\tSubscriptionOpUnsubscribe(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Subscription{\n\t\t\t\tSubscription: &tpb.Subscription{\n\t\t\t\t\tOp:         tpb.Subscription_UNSUBCRIBE,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n\t\"subscription_nok\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSubscription(\n\t\t\t\tSubscriptionOpSubscribe(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetName(\"target1\"),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: nil,\n\t\terr: ErrInvalidMsgType,\n\t},\n\t\"subscription_err\": {\n\t\topts: []TunnelOption{\n\t\t\tTunnelSubscription(\n\t\t\t\tSubscriptionOpUnsubscribe(),\n\t\t\t\tAccept(true),\n\t\t\t\tTargetType(\"target_type1\"),\n\t\t\t\tError(\"err1\"),\n\t\t\t),\n\t\t},\n\t\tmsg: &tpb.RegisterOp{\n\t\t\tRegistration: &tpb.RegisterOp_Subscription{\n\t\t\t\tSubscription: &tpb.Subscription{\n\t\t\t\t\tOp:         tpb.Subscription_UNSUBCRIBE,\n\t\t\t\t\tAccept:     true,\n\t\t\t\t\tTargetType: \"target_type1\",\n\t\t\t\t\tError:      \"err1\",\n\t\t\t\t},\n\t\t\t}},\n\t\terr: nil,\n\t},\n}\n\nfunc TestNewRegister(t *testing.T) {\n\tfor name, item := range registerOpTargetTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewRegisterOpTarget(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\t\tt.Errorf(\"%q expected err : %v\", name, item.err)\n\t\t\t\t\tt.Errorf(\"%q got err      : %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.RegisterOpEqual(nreq, item.msg) {\n\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\tt.Errorf(\"%q expected result : %+v\", name, item.msg)\n\t\t\t\tt.Errorf(\"%q got result      : %+v\", name, nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n\tfor name, item := range registerOpSessionTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewRegisterOpSession(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\t\tt.Errorf(\"%q expected err : %v\", name, item.err)\n\t\t\t\t\tt.Errorf(\"%q got err      : %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.RegisterOpEqual(nreq, item.msg) {\n\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\tt.Errorf(\"%q expected result : %+v\", name, item.msg)\n\t\t\t\tt.Errorf(\"%q got result      : %+v\", name, nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n\tfor name, item := range registerOpSubscriptionTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewRegisterOpSubscription(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\t\tt.Errorf(\"%q expected err : %v\", name, item.err)\n\t\t\t\t\tt.Errorf(\"%q got err      : %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.RegisterOpEqual(nreq, item.msg) {\n\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\tt.Errorf(\"%q expected result : %+v\", name, item.msg)\n\t\t\t\tt.Errorf(\"%q got result      : %+v\", name, nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype dataInput struct {\n\topts []TunnelOption\n\tmsg  *tpb.Data\n\terr  error\n}\n\nvar dataTestSet = map[string]dataInput{\n\t\"data_ok\": {\n\t\topts: []TunnelOption{\n\t\t\tTag(42),\n\t\t\tData([]byte(\"foo\")),\n\t\t\tClose(true),\n\t\t},\n\t\tmsg: &tpb.Data{\n\t\t\tTag:   42,\n\t\t\tData:  []byte(\"foo\"),\n\t\t\tClose: true,\n\t\t},\n\t\terr: nil,\n\t},\n\t\"data_nok\": {\n\t\topts: []TunnelOption{\n\t\t\tTargetName(\"bar\"),\n\t\t\tTag(42),\n\t\t\tData([]byte(\"foo\")),\n\t\t\tClose(true),\n\t\t},\n\t\tmsg: nil,\n\t\terr: ErrInvalidMsgType,\n\t},\n}\n\nfunc TestNewData(t *testing.T) {\n\tfor name, item := range dataTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tnreq, err := NewData(item.opts...)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, item.err) {\n\t\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\t\tt.Errorf(\"%q expected err : %v\", name, item.err)\n\t\t\t\t\tt.Errorf(\"%q got err      : %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !testutils.TunnelDataEqual(nreq, item.msg) {\n\t\t\t\tt.Errorf(\"%q failed\", name)\n\t\t\t\tt.Errorf(\"%q expected result : %+v\", name, item.msg)\n\t\t\t\tt.Errorf(\"%q got result      : %+v\", name, nreq)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/api/types/sasl.go",
    "content": "package types\n\ntype SASL struct {\n\tUser      string `mapstructure:\"user,omitempty\"`\n\tPassword  string `mapstructure:\"password,omitempty\"`\n\tMechanism string `mapstructure:\"mechanism,omitempty\"`\n\tTokenURL  string `mapstructure:\"token-url,omitempty\"`\n}\n"
  },
  {
    "path": "pkg/api/types/subscription.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage types\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tnotApplicable = \"NA\"\n)\n\n// SubscriptionConfig //\ntype SubscriptionConfig struct {\n\tName                string                `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tModels              []string              `mapstructure:\"models,omitempty\" json:\"models,omitempty\"`\n\tPrefix              string                `mapstructure:\"prefix,omitempty\" json:\"prefix,omitempty\"`\n\tTarget              string                `mapstructure:\"target,omitempty\" json:\"target,omitempty\"`\n\tSetTarget           bool                  `mapstructure:\"set-target,omitempty\" json:\"set-target,omitempty\"`\n\tPaths               []string              `mapstructure:\"paths,omitempty\" json:\"paths,omitempty\"`\n\tMode                string                `mapstructure:\"mode,omitempty\" json:\"mode,omitempty\"`\n\tStreamMode          string                `mapstructure:\"stream-mode,omitempty\" json:\"stream-mode,omitempty\"`\n\tEncoding            *string               `mapstructure:\"encoding,omitempty\" json:\"encoding,omitempty\"`\n\tQos                 *uint32               `mapstructure:\"qos,omitempty\" json:\"qos,omitempty\"`\n\tSampleInterval      *time.Duration        `mapstructure:\"sample-interval,omitempty\" json:\"sample-interval,omitempty\"`\n\tHeartbeatInterval   *time.Duration        `mapstructure:\"heartbeat-interval,omitempty\" json:\"heartbeat-interval,omitempty\"`\n\tSuppressRedundant   bool                  `mapstructure:\"suppress-redundant,omitempty\" json:\"suppress-redundant,omitempty\"`\n\tUpdatesOnly         bool                  `mapstructure:\"updates-only,omitempty\" json:\"updates-only,omitempty\"`\n\tHistory             *HistoryConfig        `mapstructure:\"history,omitempty\" json:\"history,omitempty\"`\n\tStreamSubscriptions []*SubscriptionConfig `mapstructure:\"stream-subscriptions,omitempty\" json:\"stream-subscriptions,omitempty\"`\n\tOutputs             []string              `mapstructure:\"outputs,omitempty\" json:\"outputs,omitempty\"`\n\tDepth               uint32                `mapstructure:\"depth,omitempty\" json:\"depth,omitempty\"`\n}\n\ntype HistoryConfig struct {\n\tSnapshot time.Time `mapstructure:\"snapshot,omitempty\" json:\"snapshot,omitempty\"`\n\tStart    time.Time `mapstructure:\"start,omitempty\" json:\"start,omitempty\"`\n\tEnd      time.Time `mapstructure:\"end,omitempty\" json:\"end,omitempty\"`\n}\n\n// String //\nfunc (sc *SubscriptionConfig) String() string {\n\tb, err := json.Marshal(sc)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (sc *SubscriptionConfig) PathsString() string {\n\treturn fmt.Sprintf(\"- %s\", strings.Join(sc.Paths, \"\\n- \"))\n}\n\nfunc (sc *SubscriptionConfig) PrefixString() string {\n\tif sc.Prefix == \"\" {\n\t\treturn notApplicable\n\t}\n\treturn sc.Prefix\n}\n\nfunc (sc *SubscriptionConfig) ModeString() string {\n\tif strings.ToLower(sc.Mode) == \"stream\" {\n\t\treturn fmt.Sprintf(\"%s/%s\", strings.ToLower(sc.Mode), strings.ToLower(sc.StreamMode))\n\t}\n\treturn strings.ToLower(sc.Mode)\n}\n\nfunc (sc *SubscriptionConfig) SampleIntervalString() string {\n\tif strings.ToLower(sc.Mode) == \"stream\" && strings.ToLower(sc.StreamMode) == \"sample\" {\n\t\tif sc.SampleInterval == nil {\n\t\t\treturn \"0s\"\n\t\t}\n\t\treturn sc.SampleInterval.String()\n\t}\n\treturn notApplicable\n}\n\nfunc (sc *SubscriptionConfig) ModelsString() string {\n\treturn fmt.Sprintf(\"- %s\", strings.Join(sc.Models, \"\\n- \"))\n}\n\nfunc (sc *SubscriptionConfig) QosString() string {\n\tif sc.Qos == nil {\n\t\treturn notApplicable\n\t}\n\treturn fmt.Sprintf(\"%d\", *sc.Qos)\n}\n\nfunc (sc *SubscriptionConfig) HeartbeatIntervalString() string {\n\tif sc.HeartbeatInterval == nil {\n\t\treturn \"0s\"\n\t}\n\treturn sc.HeartbeatInterval.String()\n}\n\nfunc (sc *SubscriptionConfig) SuppressRedundantString() string {\n\treturn fmt.Sprintf(\"%t\", sc.SuppressRedundant)\n}\n\nfunc (sc *SubscriptionConfig) UpdatesOnlyString() string {\n\treturn fmt.Sprintf(\"%t\", sc.UpdatesOnly)\n}\n"
  },
  {
    "path": "pkg/api/types/target.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage types\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"maps\"\n\t\"os\"\n\t\"reflect\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/oauth2\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/grpc/credentials/oauth\"\n\t\"google.golang.org/grpc/encoding/gzip\"\n\t\"google.golang.org/grpc/keepalive\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\n// map of supported cipher suites\nfunc ciphersMap() map[string]uint16 {\n\treturn map[string]uint16{\n\t\t// secure, up to tls1.2\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\": tls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\": tls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\t// secure, only tls1.2\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,\n\t\t// secure, tls1.3\n\t\t\"TLS_AES_128_GCM_SHA256\":       tls.TLS_AES_128_GCM_SHA256,\n\t\t\"TLS_AES_256_GCM_SHA384\":       tls.TLS_AES_256_GCM_SHA384,\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\": tls.TLS_CHACHA20_POLY1305_SHA256,\n\t\t// secure, ECDHE\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\":          tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\":          tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\":            tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\":            tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\":       tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\":       tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\":         tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\":         tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\":   tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,\n\t\t// insecure\n\t\t\"TLS_RSA_WITH_RC4_128_SHA\":                tls.TLS_RSA_WITH_RC4_128_SHA,\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\":           tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA256\":         tls.TLS_RSA_WITH_AES_128_CBC_SHA256,\n\t\t\"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\":        tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\t\"TLS_ECDHE_RSA_WITH_RC4_128_SHA\":          tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\":     tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\":   tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,\n\t}\n}\n\nvar cipherSuitesPreferenceOrder = []uint16{\n\t// AEADs w/ ECDHE\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\ttls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,\n\n\t// CBC w/ ECDHE\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\n\t// AEADs w/o ECDHE\n\ttls.TLS_RSA_WITH_AES_128_GCM_SHA256,\n\ttls.TLS_RSA_WITH_AES_256_GCM_SHA384,\n\n\t// CBC w/o ECDHE\n\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\n\t// 3DES\n\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\n\t// disabled cipher suites\n\t// CBC_SHA256\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,\n\ttls.TLS_RSA_WITH_AES_128_CBC_SHA256,\n\n\t// RC4\n\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\ttls.TLS_RSA_WITH_RC4_128_SHA,\n}\n\nvar disabledCipherSuites = []uint16{\n\t// CBC_SHA256\n\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,\n\ttls.TLS_RSA_WITH_AES_128_CBC_SHA256,\n\n\t// RC4\n\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\ttls.TLS_RSA_WITH_RC4_128_SHA,\n}\n\nvar (\n\tdefaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)\n\tdefaultCipherSuites    = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen]\n)\n\nvar defaultCipherSuitesTLS13 = []uint16{\n\ttls.TLS_AES_128_GCM_SHA256,\n\ttls.TLS_AES_256_GCM_SHA384,\n\ttls.TLS_CHACHA20_POLY1305_SHA256,\n}\n\n// TargetConfig //\ntype TargetConfig struct {\n\tName                       string            `mapstructure:\"name,omitempty\" yaml:\"name,omitempty\" json:\"name,omitempty\"`\n\tAddress                    string            `mapstructure:\"address,omitempty\" yaml:\"address,omitempty\" json:\"address,omitempty\"`\n\tUsername                   *string           `mapstructure:\"username,omitempty\" yaml:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword                   *string           `mapstructure:\"password,omitempty\" yaml:\"password,omitempty\" json:\"password,omitempty\"`\n\tAuthScheme                 string            `mapstructure:\"auth-scheme,omitempty\" yaml:\"auth-scheme,omitempty\" json:\"auth-scheme,omitempty\"`\n\tTimeout                    time.Duration     `mapstructure:\"timeout,omitempty\" yaml:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tInsecure                   *bool             `mapstructure:\"insecure,omitempty\" yaml:\"insecure,omitempty\" json:\"insecure,omitempty\"`\n\tTLSCA                      *string           `mapstructure:\"tls-ca,omitempty\" yaml:\"tls-ca,omitempty\" json:\"tls-ca,omitempty\"`\n\tTLSCert                    *string           `mapstructure:\"tls-cert,omitempty\" yaml:\"tls-cert,omitempty\" json:\"tls-cert,omitempty\"`\n\tTLSKey                     *string           `mapstructure:\"tls-key,omitempty\" yaml:\"tls-key,omitempty\" json:\"tls-key,omitempty\"`\n\tSkipVerify                 *bool             `mapstructure:\"skip-verify,omitempty\" yaml:\"skip-verify,omitempty\" json:\"skip-verify,omitempty\"`\n\tTLSServerName              string            `mapstructure:\"tls-server-name,omitempty\" yaml:\"tls-server-name,omitempty\" json:\"tls-server-name,omitempty\"`\n\tSubscriptions              []string          `mapstructure:\"subscriptions,omitempty\" yaml:\"subscriptions,omitempty\" json:\"subscriptions,omitempty\"`\n\tOutputs                    []string          `mapstructure:\"outputs,omitempty\" yaml:\"outputs,omitempty\" json:\"outputs,omitempty\"`\n\tBufferSize                 uint              `mapstructure:\"buffer-size,omitempty\" yaml:\"buffer-size,omitempty\" json:\"buffer-size,omitempty\"`\n\tGRPCReadBufferSize         *int              `mapstructure:\"grpc-read-buffer-size,omitempty\" yaml:\"grpc-read-buffer-size,omitempty\" json:\"grpc-read-buffer-size,omitempty\"`\n\tGRPCWriteBufferSize        *int              `mapstructure:\"grpc-write-buffer-size,omitempty\" yaml:\"grpc-write-buffer-size,omitempty\" json:\"grpc-write-buffer-size,omitempty\"`\n\tGRPCConnWindowSize         *int              `mapstructure:\"grpc-conn-window-size,omitempty\" yaml:\"grpc-conn-window-size,omitempty\" json:\"grpc-conn-window-size,omitempty\"`\n\tGRPCWindowSize             *int              `mapstructure:\"grpc-window-size,omitempty\" yaml:\"grpc-window-size,omitempty\" json:\"grpc-window-size,omitempty\"`\n\tGRPCStaticConnWindowSize   *int              `mapstructure:\"grpc-static-conn-window-size,omitempty\" yaml:\"grpc-static-conn-window-size,omitempty\" json:\"grpc-static-conn-window-size,omitempty\"`\n\tGRPCStaticStreamWindowSize *int              `mapstructure:\"grpc-static-stream-window-size,omitempty\" yaml:\"grpc-static-stream-window-size,omitempty\" json:\"grpc-static-stream-window-size,omitempty\"`\n\tRetryTimer                 time.Duration     `mapstructure:\"retry-timer,omitempty\" yaml:\"retry-timer,omitempty\" json:\"retry-timer,omitempty\"`\n\tTLSMinVersion              string            `mapstructure:\"tls-min-version,omitempty\" yaml:\"tls-min-version,omitempty\" json:\"tls-min-version,omitempty\"`\n\tTLSMaxVersion              string            `mapstructure:\"tls-max-version,omitempty\" yaml:\"tls-max-version,omitempty\" json:\"tls-max-version,omitempty\"`\n\tTLSVersion                 string            `mapstructure:\"tls-version,omitempty\" yaml:\"tls-version,omitempty\" json:\"tls-version,omitempty\"`\n\tLogTLSSecret               *bool             `mapstructure:\"log-tls-secret,omitempty\" yaml:\"log-tls-secret,omitempty\" json:\"log-tls-secret,omitempty\"`\n\tProtoFiles                 []string          `mapstructure:\"proto-files,omitempty\" yaml:\"proto-files,omitempty\" json:\"proto-files,omitempty\"`\n\tProtoDirs                  []string          `mapstructure:\"proto-dirs,omitempty\" yaml:\"proto-dirs,omitempty\" json:\"proto-dirs,omitempty\"`\n\tTags                       []string          `mapstructure:\"tags,omitempty\" yaml:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tEventTags                  map[string]string `mapstructure:\"event-tags,omitempty\" yaml:\"event-tags,omitempty\" json:\"event-tags,omitempty\"`\n\tGzip                       *bool             `mapstructure:\"gzip,omitempty\" yaml:\"gzip,omitempty\" json:\"gzip,omitempty\"`\n\tToken                      *string           `mapstructure:\"token,omitempty\" yaml:\"token,omitempty\" json:\"token,omitempty\"`\n\tProxy                      string            `mapstructure:\"proxy,omitempty\" yaml:\"proxy,omitempty\" json:\"proxy,omitempty\"`\n\t//\n\tTunnelTargetType string            `mapstructure:\"-\" yaml:\"tunnel-target-type,omitempty\" json:\"tunnel-target-type,omitempty\"`\n\tEncoding         *string           `mapstructure:\"encoding,omitempty\" yaml:\"encoding,omitempty\" json:\"encoding,omitempty\"`\n\tMetadata         map[string]string `mapstructure:\"metadata,omitempty\" yaml:\"metadata,omitempty\" json:\"metadata,omitempty\"`\n\tCipherSuites     []string          `mapstructure:\"cipher-suites,omitempty\" yaml:\"cipher-suites,omitempty\" json:\"cipher-suites,omitempty\"`\n\tTCPKeepalive     time.Duration     `mapstructure:\"tcp-keepalive,omitempty\" yaml:\"tcp-keepalive,omitempty\" json:\"tcp-keepalive,omitempty\"`\n\tGRPCKeepalive    *ClientKeepalive  `mapstructure:\"grpc-keepalive,omitempty\" yaml:\"grpc-keepalive,omitempty\" json:\"grpc-keepalive,omitempty\"`\n\n\ttlsConfig *tls.Config\n}\n\ntype ClientKeepalive struct {\n\tTime                time.Duration `mapstructure:\"time,omitempty\"`\n\tTimeout             time.Duration `mapstructure:\"timeout,omitempty\"`\n\tPermitWithoutStream bool          `mapstructure:\"permit-without-stream,omitempty\"`\n}\n\nfunc (tc TargetConfig) String() string {\n\tif tc.Password != nil {\n\t\tpwd := \"****\"\n\t\ttc.Password = &pwd\n\t}\n\n\tb, err := json.Marshal(tc)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn string(b)\n}\n\nfunc clonePtr[T any](p *T) *T {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tv := *p\n\treturn &v\n}\n\nfunc (tc *TargetConfig) DeepCopy() *TargetConfig {\n\tif tc == nil {\n\t\treturn nil\n\t}\n\tntc := &TargetConfig{\n\t\tName:                       tc.Name,\n\t\tAddress:                    tc.Address,\n\t\tUsername:                   clonePtr(tc.Username),\n\t\tPassword:                   clonePtr(tc.Password),\n\t\tAuthScheme:                 tc.AuthScheme,\n\t\tTimeout:                    tc.Timeout,\n\t\tInsecure:                   clonePtr(tc.Insecure),\n\t\tTLSCA:                      clonePtr(tc.TLSCA),\n\t\tTLSCert:                    clonePtr(tc.TLSCert),\n\t\tTLSKey:                     clonePtr(tc.TLSKey),\n\t\tSkipVerify:                 clonePtr(tc.SkipVerify),\n\t\tTLSServerName:              tc.TLSServerName,\n\t\tSubscriptions:              make([]string, 0, len(tc.Subscriptions)),\n\t\tOutputs:                    make([]string, 0, len(tc.Outputs)),\n\t\tBufferSize:                 tc.BufferSize,\n\t\tGRPCReadBufferSize:         clonePtr(tc.GRPCReadBufferSize),\n\t\tGRPCWriteBufferSize:        clonePtr(tc.GRPCWriteBufferSize),\n\t\tGRPCConnWindowSize:         clonePtr(tc.GRPCConnWindowSize),\n\t\tGRPCWindowSize:             clonePtr(tc.GRPCWindowSize),\n\t\tGRPCStaticConnWindowSize:   clonePtr(tc.GRPCStaticConnWindowSize),\n\t\tGRPCStaticStreamWindowSize: clonePtr(tc.GRPCStaticStreamWindowSize),\n\t\tRetryTimer:                 tc.RetryTimer,\n\t\tTLSMinVersion:              tc.TLSMinVersion,\n\t\tTLSMaxVersion:              tc.TLSMaxVersion,\n\t\tTLSVersion:                 tc.TLSVersion,\n\t\tLogTLSSecret:               clonePtr(tc.LogTLSSecret),\n\t\tProtoFiles:                 make([]string, 0, len(tc.ProtoFiles)),\n\t\tProtoDirs:                  make([]string, 0, len(tc.ProtoDirs)),\n\t\tTags:                       make([]string, 0, len(tc.Tags)),\n\t\tEventTags:                  make(map[string]string, len(tc.EventTags)),\n\t\tGzip:                       clonePtr(tc.Gzip),\n\t\tToken:                      clonePtr(tc.Token),\n\t\tProxy:                      tc.Proxy,\n\t\tTunnelTargetType:           tc.TunnelTargetType,\n\t\tEncoding:                   clonePtr(tc.Encoding),\n\t\tMetadata:                   make(map[string]string, len(tc.Metadata)),\n\t\tCipherSuites:               make([]string, 0, len(tc.CipherSuites)),\n\t\tTCPKeepalive:               tc.TCPKeepalive,\n\t}\n\tntc.Subscriptions = append(ntc.Subscriptions, tc.Subscriptions...)\n\tntc.Outputs = append(ntc.Outputs, tc.Outputs...)\n\tntc.ProtoFiles = append(ntc.ProtoFiles, tc.ProtoFiles...)\n\tntc.ProtoDirs = append(ntc.ProtoDirs, tc.ProtoDirs...)\n\tntc.Tags = append(ntc.Tags, tc.Tags...)\n\tntc.CipherSuites = append(ntc.CipherSuites, tc.CipherSuites...)\n\n\tmaps.Copy(ntc.EventTags, tc.EventTags)\n\tmaps.Copy(ntc.Metadata, tc.Metadata)\n\n\tif tc.GRPCKeepalive != nil {\n\t\tntc.GRPCKeepalive = &ClientKeepalive{\n\t\t\tTime:                tc.GRPCKeepalive.Time,\n\t\t\tTimeout:             tc.GRPCKeepalive.Timeout,\n\t\t\tPermitWithoutStream: tc.GRPCKeepalive.PermitWithoutStream,\n\t\t}\n\t}\n\treturn ntc\n}\n\nfunc (tc *TargetConfig) SetTLSConfig(tlsConfig *tls.Config) {\n\ttc.tlsConfig = tlsConfig\n}\n\n// NewTLSConfig //\nfunc (tc *TargetConfig) NewTLSConfig() (*tls.Config, error) {\n\tif tc.tlsConfig != nil {\n\t\treturn tc.tlsConfig, nil\n\t}\n\tvar ca, cert, key string\n\tif tc.TLSCA != nil {\n\t\tca = *tc.TLSCA\n\t}\n\tif tc.TLSCert != nil {\n\t\tcert = *tc.TLSCert\n\t}\n\tif tc.TLSKey != nil {\n\t\tkey = *tc.TLSKey\n\t}\n\tvar skipVerify bool\n\tif tc.SkipVerify != nil {\n\t\tskipVerify = *tc.SkipVerify\n\t}\n\ttlsConfig, err := utils.NewTLSConfig(ca, cert, key, \"\", skipVerify, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlsConfig == nil {\n\t\treturn nil, nil\n\t}\n\tif tc.LogTLSSecret != nil && *tc.LogTLSSecret {\n\t\tlogPath := tc.Name + \".tlssecret.log\"\n\t\tw, err := os.Create(logPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsConfig.KeyLogWriter = w\n\t}\n\n\ttlsConfig.MaxVersion = tc.getTLSMaxVersion()\n\ttlsConfig.MinVersion = tc.getTLSMinVersion()\n\ttlsConfig.ServerName = tc.TLSServerName\n\n\t// tc.cipher-suites is not set\n\tif len(tlsConfig.CipherSuites) == 0 && len(tc.CipherSuites) == 0 {\n\t\tcs := make([]uint16, len(defaultCipherSuites), len(defaultCipherSuites)+len(defaultCipherSuitesTLS13))\n\t\tcopy(cs, defaultCipherSuites)\n\t\tif tlsConfig.MaxVersion == tls.VersionTLS13 || tlsConfig.MaxVersion == 0 {\n\t\t\tcs = append(cs, defaultCipherSuitesTLS13...)\n\t\t}\n\t\ttlsConfig.CipherSuites = cs\n\t}\n\t// tc.cipher-suites is set\n\tif len(tlsConfig.CipherSuites) == 0 && len(tc.CipherSuites) != 0 {\n\t\ttlsConfig.CipherSuites = make([]uint16, 0, len(tc.CipherSuites))\n\t\tcmap := ciphersMap()\n\t\tfor _, cs := range tc.CipherSuites {\n\t\t\tif _, ok := cmap[cs]; !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unknown cipher suite %q\", cs)\n\t\t\t}\n\t\t\ttlsConfig.CipherSuites = append(tlsConfig.CipherSuites, cmap[cs])\n\t\t}\n\t}\n\treturn tlsConfig, nil\n}\n\n// GrpcDialOptions creates the grpc.dialOption list from the target's configuration\nfunc (tc *TargetConfig) GrpcDialOptions() ([]grpc.DialOption, error) {\n\ttOpts := make([]grpc.DialOption, 0, 1)\n\t// gzip\n\tif tc.Gzip != nil && *tc.Gzip {\n\t\ttOpts = append(tOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))\n\t}\n\t// gRPC keepalive\n\tif tc.GRPCKeepalive != nil {\n\t\ttOpts = append(tOpts, grpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\tTime:                tc.GRPCKeepalive.Time,\n\t\t\tTimeout:             tc.GRPCKeepalive.Timeout,\n\t\t\tPermitWithoutStream: tc.GRPCKeepalive.PermitWithoutStream,\n\t\t}))\n\t}\n\n\tif tc.GRPCReadBufferSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithReadBufferSize(*tc.GRPCReadBufferSize))\n\t}\n\n\tif tc.GRPCWriteBufferSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithWriteBufferSize(*tc.GRPCWriteBufferSize))\n\t}\n\n\tif tc.GRPCConnWindowSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithInitialConnWindowSize(int32(*tc.GRPCConnWindowSize)))\n\t}\n\n\tif tc.GRPCWindowSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithInitialWindowSize(int32(*tc.GRPCWindowSize)))\n\t}\n\n\tif tc.GRPCStaticConnWindowSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithStaticConnWindowSize(int32(*tc.GRPCStaticConnWindowSize)))\n\t}\n\n\tif tc.GRPCStaticStreamWindowSize != nil {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithStaticStreamWindowSize(int32(*tc.GRPCStaticStreamWindowSize)))\n\t}\n\n\t// insecure\n\tif tc.Insecure != nil && *tc.Insecure {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithTransportCredentials(\n\t\t\t\tinsecure.NewCredentials(),\n\t\t\t),\n\t\t)\n\t\treturn tOpts, nil\n\t}\n\t// secure\n\ttlsConfig, err := tc.NewTLSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttOpts = append(tOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))\n\t// token credentials\n\tif tc.Token != nil && *tc.Token != \"\" {\n\t\ttOpts = append(tOpts,\n\t\t\tgrpc.WithPerRPCCredentials(\n\t\t\t\toauth.TokenSource{\n\t\t\t\t\tTokenSource: oauth2.StaticTokenSource(\n\t\t\t\t\t\t&oauth2.Token{\n\t\t\t\t\t\t\tAccessToken: *tc.Token,\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t))\n\t}\n\treturn tOpts, nil\n}\n\nfunc (tc *TargetConfig) UsernameString() string {\n\tif tc.Username == nil {\n\t\treturn notApplicable\n\t}\n\treturn *tc.Username\n}\n\nfunc (tc *TargetConfig) PasswordString() string {\n\tif tc.Password == nil {\n\t\treturn notApplicable\n\t}\n\treturn *tc.Password\n}\n\nfunc (tc *TargetConfig) InsecureString() string {\n\tif tc.Insecure == nil {\n\t\treturn notApplicable\n\t}\n\treturn fmt.Sprintf(\"%t\", *tc.Insecure)\n}\n\nfunc (tc *TargetConfig) TLSCAString() string {\n\tif tc.TLSCA == nil || *tc.TLSCA == \"\" {\n\t\treturn notApplicable\n\t}\n\treturn *tc.TLSCA\n}\n\nfunc (tc *TargetConfig) TLSKeyString() string {\n\tif tc.TLSKey == nil || *tc.TLSKey == \"\" {\n\t\treturn notApplicable\n\t}\n\treturn *tc.TLSKey\n}\n\nfunc (tc *TargetConfig) TLSCertString() string {\n\tif tc.TLSCert == nil || *tc.TLSCert == \"\" {\n\t\treturn notApplicable\n\t}\n\treturn *tc.TLSCert\n}\n\nfunc (tc *TargetConfig) SkipVerifyString() string {\n\tif tc.SkipVerify == nil {\n\t\treturn notApplicable\n\t}\n\treturn fmt.Sprintf(\"%t\", *tc.SkipVerify)\n}\n\nfunc (tc *TargetConfig) SubscriptionString() string {\n\treturn fmt.Sprintf(\"- %s\", strings.Join(tc.Subscriptions, \"\\n\"))\n}\n\nfunc (tc *TargetConfig) OutputsString() string {\n\treturn strings.Join(tc.Outputs, \"\\n\")\n}\n\nfunc (tc *TargetConfig) BufferSizeString() string {\n\treturn fmt.Sprintf(\"%d\", tc.BufferSize)\n}\n\nfunc (tc *TargetConfig) getTLSMinVersion() uint16 {\n\tv := tlsVersionStringToUint(tc.TLSVersion)\n\tif v > 0 {\n\t\treturn v\n\t}\n\treturn tlsVersionStringToUint(tc.TLSMinVersion)\n}\n\nfunc (tc *TargetConfig) getTLSMaxVersion() uint16 {\n\tv := tlsVersionStringToUint(tc.TLSVersion)\n\tif v > 0 {\n\t\treturn v\n\t}\n\treturn tlsVersionStringToUint(tc.TLSMaxVersion)\n}\n\nfunc tlsVersionStringToUint(v string) uint16 {\n\tswitch v {\n\tdefault:\n\t\treturn 0\n\tcase \"1.3\":\n\t\treturn tls.VersionTLS13\n\tcase \"1.2\":\n\t\treturn tls.VersionTLS12\n\tcase \"1.1\":\n\t\treturn tls.VersionTLS11\n\tcase \"1.0\", \"1\":\n\t\treturn tls.VersionTLS10\n\t}\n}\n\nfunc (tc *TargetConfig) Equal(other *TargetConfig) bool {\n\tif tc == other {\n\t\treturn true\n\t}\n\tif tc == nil || other == nil {\n\t\treturn false\n\t}\n\n\tptrEq := func(a, b any) bool {\n\t\tif a == nil && b == nil {\n\t\t\treturn true\n\t\t}\n\t\tif a == nil || b == nil {\n\t\t\treturn false\n\t\t}\n\t\treturn reflect.DeepEqual(a, b)\n\t}\n\n\treturn tc.Name == other.Name &&\n\t\ttc.Address == other.Address &&\n\t\tptrEq(tc.Username, other.Username) &&\n\t\tptrEq(tc.Password, other.Password) &&\n\t\ttc.AuthScheme == other.AuthScheme &&\n\t\ttc.Timeout == other.Timeout &&\n\t\tptrEq(tc.Insecure, other.Insecure) &&\n\t\tptrEq(tc.TLSCA, other.TLSCA) &&\n\t\tptrEq(tc.TLSCert, other.TLSCert) &&\n\t\tptrEq(tc.TLSKey, other.TLSKey) &&\n\t\tptrEq(tc.SkipVerify, other.SkipVerify) &&\n\t\ttc.TLSServerName == other.TLSServerName &&\n\t\tslices.Equal(tc.Subscriptions, other.Subscriptions) &&\n\t\tslices.Equal(tc.Outputs, other.Outputs) &&\n\t\ttc.BufferSize == other.BufferSize &&\n\t\ttc.RetryTimer == other.RetryTimer &&\n\t\ttc.TLSMinVersion == other.TLSMinVersion &&\n\t\ttc.TLSMaxVersion == other.TLSMaxVersion &&\n\t\ttc.TLSVersion == other.TLSVersion &&\n\t\tptrEq(tc.LogTLSSecret, other.LogTLSSecret) &&\n\t\tslices.Equal(tc.ProtoFiles, other.ProtoFiles) &&\n\t\tslices.Equal(tc.ProtoDirs, other.ProtoDirs) &&\n\t\tslices.Equal(tc.Tags, other.Tags) &&\n\t\tmaps.Equal(tc.EventTags, other.EventTags) &&\n\t\tptrEq(tc.Gzip, other.Gzip) &&\n\t\tptrEq(tc.Token, other.Token) &&\n\t\ttc.Proxy == other.Proxy &&\n\t\ttc.TunnelTargetType == other.TunnelTargetType &&\n\t\tptrEq(tc.Encoding, other.Encoding) &&\n\t\tmaps.Equal(tc.Metadata, other.Metadata) &&\n\t\tslices.Equal(tc.CipherSuites, other.CipherSuites) &&\n\t\ttc.TCPKeepalive == other.TCPKeepalive &&\n\t\treflect.DeepEqual(tc.GRPCKeepalive, other.GRPCKeepalive) &&\n\t\ttc.GRPCReadBufferSize == other.GRPCReadBufferSize &&\n\t\ttc.GRPCWriteBufferSize == other.GRPCWriteBufferSize &&\n\t\ttc.GRPCConnWindowSize == other.GRPCConnWindowSize &&\n\t\ttc.GRPCWindowSize == other.GRPCWindowSize &&\n\t\ttc.GRPCStaticConnWindowSize == other.GRPCStaticConnWindowSize &&\n\t\ttc.GRPCStaticStreamWindowSize == other.GRPCStaticStreamWindowSize\n}\n"
  },
  {
    "path": "pkg/api/types/tls.go",
    "content": "package types\n\nimport \"fmt\"\n\ntype TLSConfig struct {\n\tCaFile     string `mapstructure:\"ca-file,omitempty\"`\n\tKeyFile    string `mapstructure:\"key-file,omitempty\"`\n\tCertFile   string `mapstructure:\"cert-file,omitempty\"`\n\tSkipVerify bool   `mapstructure:\"skip-verify,omitempty\"`\n\tClientAuth string `mapstructure:\"client-auth,omitempty\"`\n}\n\nfunc (t *TLSConfig) Validate() error {\n\tif t == nil {\n\t\treturn nil\n\t}\n\tswitch t.ClientAuth {\n\tcase \"\", \"request\":\n\tcase \"require\", \"verify-if-given\", \"require-verify\":\n\t\tif t.CaFile == \"\" {\n\t\t\treturn fmt.Errorf(\"ca-file is required when `client-auth` is %q\", t.ClientAuth)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown `client-auth` mode: %s\", t.ClientAuth)\n\t}\n\treturn nil\n}\n\nfunc (t *TLSConfig) Equal(other *TLSConfig) bool {\n\tif t == nil && other == nil {\n\t\treturn true\n\t}\n\tif t == nil || other == nil {\n\t\treturn false\n\t}\n\treturn t.CaFile == other.CaFile &&\n\t\tt.CertFile == other.CertFile &&\n\t\tt.KeyFile == other.KeyFile &&\n\t\tt.SkipVerify == other.SkipVerify &&\n\t\tt.ClientAuth == other.ClientAuth\n}\n"
  },
  {
    "path": "pkg/api/utils/tls.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage utils\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/big\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\n// NewTLSConfig generates a *tls.Config based on given CA, certificate, key files and skipVerify flag\n// if certificate and key are missing a self signed key pair is generated.\n// The certificates paths can be local or remote, http(s) and (s)ftp are supported for remote files.\nfunc NewTLSConfig(ca, cert, key, clientAuth string, skipVerify, genSelfSigned bool) (*tls.Config, error) {\n\tif !(skipVerify || ca != \"\" || (cert != \"\" && key != \"\")) {\n\t\treturn nil, nil\n\t}\n\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: skipVerify,\n\t}\n\n\t// set clientAuth\n\tswitch clientAuth {\n\tcase \"\":\n\t\tif ca != \"\" {\n\t\t\ttlsConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\t\t}\n\tcase \"request\":\n\t\ttlsConfig.ClientAuth = tls.RequestClientCert\n\tcase \"require\":\n\t\ttlsConfig.ClientAuth = tls.RequireAnyClientCert\n\tcase \"verify-if-given\":\n\t\ttlsConfig.ClientAuth = tls.VerifyClientCertIfGiven\n\tcase \"require-verify\":\n\t\ttlsConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown client-auth mode: %s\", clientAuth)\n\t}\n\tif cert != \"\" && key != \"\" {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\n\t\tvar certBytes, keyBytes []byte\n\n\t\terrCh := make(chan error, 2)\n\t\twg := new(sync.WaitGroup)\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tvar err error\n\t\t\tcertBytes, err = ReadLocalFile(ctx, cert)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tvar err error\n\t\t\tkeyBytes, err = ReadLocalFile(ctx, key)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\twg.Wait()\n\t\tclose(errCh)\n\t\tfor err := range errCh {\n\t\t\treturn nil, err\n\t\t}\n\t\tcertificate, err := tls.X509KeyPair(certBytes, keyBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlsConfig.Certificates = []tls.Certificate{certificate}\n\t} else if genSelfSigned {\n\t\tcert, err := SelfSignedCerts()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsConfig.Certificates = []tls.Certificate{cert}\n\t}\n\tif ca != \"\" {\n\t\tcertPool, err := LoadCACertificates(ca)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsConfig.RootCAs = certPool\n\t\ttlsConfig.ClientCAs = certPool\n\t}\n\treturn tlsConfig, nil\n}\n\nfunc SelfSignedCerts() (tls.Certificate, error) {\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(365 * 24 * time.Hour)\n\n\tserialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil\n\t}\n\tcertTemplate := &x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"openconfig.net\"},\n\t\t},\n\t\tDNSNames:              []string{\"openconfig.net\"},\n\t\tNotBefore:             notBefore,\n\t\tNotAfter:              notAfter,\n\t\tKeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\tpriv, err := rsa.GenerateKey(rand.Reader, 4096)\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil\n\t}\n\tcertBuff := new(bytes.Buffer)\n\tkeyBuff := new(bytes.Buffer)\n\tpem.Encode(certBuff, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tpem.Encode(keyBuff, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\treturn tls.X509KeyPair(certBuff.Bytes(), keyBuff.Bytes())\n}\n\n// readLocalFile reads a file from the local file system,\n// unmarshals the content into a map[string]*types.TargetConfig\n// and returns\nfunc ReadLocalFile(ctx context.Context, path string) ([]byte, error) {\n\t// read from stdin\n\tif path == \"-\" {\n\t\treturn readFromStdin(ctx)\n\t}\n\n\t// local file\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tst, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif st.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%q is a directory\", path)\n\t}\n\tdata := make([]byte, st.Size())\n\n\trd := bufio.NewReader(f)\n\t_, err = rd.Read(data)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}\n\n// read bytes from stdin\nfunc readFromStdin(ctx context.Context) ([]byte, error) {\n\t// read from stdin\n\tdata := make([]byte, 0, 128)\n\trd := bufio.NewReader(os.Stdin)\n\tbuf := make([]byte, 128)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t\tn, err := rd.Read(buf)\n\t\t\tif err == io.EOF {\n\t\t\t\tdata = append(data, buf[:n]...)\n\t\t\t\treturn data, nil\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdata = append(data, buf[:n]...)\n\t\t}\n\t}\n}\n\n// LoadCACertificates reads PEM-encoded CA certificates from a file and adds them to a CertPool.\n// It returns the CertPool and any error encountered.\nfunc LoadCACertificates(caPath string) (*x509.CertPool, error) {\n\tst, err := os.Stat(caPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to stat the cert file: %s: %w\", caPath, err)\n\t}\n\tif st.IsDir() {\n\t\tfiles, err := os.ReadDir(caPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to read the cert directory: %s: %w\", caPath, err)\n\t\t}\n\t\tcertPool := x509.NewCertPool()\n\n\t\tfor _, file := range files {\n\t\t\tif file.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = loadCACertificatesToPool(filepath.Join(caPath, file.Name()), certPool)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to load the cert file: %s: %w\", filepath.Join(caPath, file.Name()), err)\n\t\t\t}\n\t\t}\n\t\treturn certPool, nil\n\t}\n\t// caPath is a single cert file\n\tcertPool := x509.NewCertPool()\n\terr = loadCACertificatesToPool(caPath, certPool)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load the cert file: %s: %w\", caPath, err)\n\t}\n\treturn certPool, nil\n}\n\nfunc loadCACertificatesToPool(filePath string, certPool *x509.CertPool) error {\n\tcertPEMBlock, err := os.ReadFile(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read the cert file: %s: %w\", filePath, err)\n\t}\n\n\tfor {\n\t\tblock, rest := pem.Decode(certPEMBlock)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tcertPEMBlock = rest\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to parse certificate: %w\", err)\n\t\t}\n\n\t\tif !cert.IsCA {\n\t\t\treturn fmt.Errorf(\"file %s contains a certificate that is not a CA\", filePath)\n\t\t}\n\t\tcertPool.AddCert(cert)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/api/utils/utils.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage utils\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"reflect\"\n)\n\nconst (\n\tDefaultLoggingFlags = log.LstdFlags | log.Lmicroseconds | log.Lmsgprefix\n)\n\nfunc MergeMaps(dst, src map[string]any) map[string]any {\n\tif dst == nil {\n\t\tdst = make(map[string]any)\n\t}\n\tif src == nil {\n\t\treturn dst\n\t}\n\tfor key, srcVal := range src {\n\t\tif dstVal, ok := dst[key]; ok {\n\t\t\tsrcMap, srcMapOk := mapify(srcVal)\n\t\t\tdstMap, dstMapOk := mapify(dstVal)\n\t\t\tif srcMapOk && dstMapOk {\n\t\t\t\tsrcVal = MergeMaps(dstMap, srcMap)\n\t\t\t}\n\t\t}\n\t\tdst[key] = srcVal\n\t}\n\treturn dst\n}\n\nfunc mapify(i interface{}) (map[string]interface{}, bool) {\n\tvalue := reflect.ValueOf(i)\n\tif value.Kind() == reflect.Map {\n\t\tm := map[string]interface{}{}\n\t\tfor _, k := range value.MapKeys() {\n\t\t\tm[k.String()] = value.MapIndex(k).Interface()\n\t\t}\n\t\treturn m, true\n\t}\n\treturn map[string]interface{}{}, false\n}\n\nfunc GetHost(hostport string) string {\n\th, _, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn hostport\n\t}\n\treturn h\n}\n\nfunc Convert(i interface{}) interface{} {\n\tswitch x := i.(type) {\n\tcase map[interface{}]interface{}:\n\t\tnm := map[string]interface{}{}\n\t\tfor k, v := range x {\n\t\t\tnm[k.(string)] = Convert(v)\n\t\t}\n\t\treturn nm\n\tcase map[string]interface{}:\n\t\tfor k, v := range x {\n\t\t\tx[k] = Convert(v)\n\t\t}\n\tcase []interface{}:\n\t\tfor k, v := range x {\n\t\t\tx[k] = Convert(v)\n\t\t}\n\t}\n\treturn i\n}\n"
  },
  {
    "path": "pkg/api/utils/utils_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage utils\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\nvar convertTestSet = []struct {\n\tname string\n\tin   interface{}\n\tout  interface{}\n}{\n\t{\n\t\tname: \"string\",\n\t\tin:   \"test1\",\n\t\tout:  \"test1\",\n\t},\n\t{\n\t\tname: \"map[interface{}]interface{}\",\n\t\tin: map[interface{}]interface{}{\n\t\t\t\"a\": \"b\",\n\t\t},\n\t\tout: map[string]interface{}{\n\t\t\t\"a\": \"b\",\n\t\t},\n\t},\n\t{\n\t\tname: \"map[string]interface{}\",\n\t\tin: map[string]interface{}{\n\t\t\t\"a\": map[interface{}]interface{}{\n\t\t\t\t\"b\": \"c\",\n\t\t\t},\n\t\t},\n\t\tout: map[string]interface{}{\n\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\"b\": \"c\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname: \"[]interface{}\",\n\t\tin: []interface{}{\n\t\t\t\"a\",\n\t\t},\n\t\tout: []interface{}{\n\t\t\t\"a\",\n\t\t},\n\t},\n}\n\nfunc TestConvert(t *testing.T) {\n\tfor _, item := range convertTestSet {\n\t\tt.Run(item.name, func(t *testing.T) {\n\t\t\to := Convert(item.in)\n\t\t\tif !cmp.Equal(o, item.out) {\n\t\t\t\tt.Logf(\"%q failed\", item.name)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMergeMaps(t *testing.T) {\n\ttests := []struct {\n\t\tname string // description of this test case\n\t\t// Named input parameters for target function.\n\t\tdst  map[string]interface{}\n\t\tsrc  map[string]interface{}\n\t\twant map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"empty\",\n\t\t\tdst:  nil,\n\t\t\tsrc:  nil,\n\t\t\twant: map[string]interface{}{},\n\t\t},\n\t\t{\n\t\t\tname: \"empty_dst\",\n\t\t\tdst:  nil,\n\t\t\tsrc:  map[string]interface{}{\"a\": \"b\"},\n\t\t\twant: map[string]interface{}{\"a\": \"b\"},\n\t\t},\n\t\t{\n\t\t\tname: \"empty_src\",\n\t\t\tdst:  map[string]interface{}{\"a\": \"b\"},\n\t\t\tsrc:  nil,\n\t\t\twant: map[string]interface{}{\"a\": \"b\"},\n\t\t},\n\t\t{\n\t\t\tname: \"merge\",\n\t\t\tdst:  map[string]interface{}{\"a\": \"b\"},\n\t\t\tsrc:  map[string]interface{}{\"a\": \"c\"},\n\t\t\twant: map[string]interface{}{\"a\": \"c\"},\n\t\t},\n\t\t{\n\t\t\tname: \"merge_with_map\",\n\t\t\tdst:  map[string]interface{}{\"a\": \"b\"},\n\t\t\tsrc:  map[string]interface{}{\"a\": map[string]interface{}{\"c\": \"d\"}},\n\t\t\twant: map[string]interface{}{\"a\": map[string]interface{}{\"c\": \"d\"}},\n\t\t},\n\t\t{\n\t\t\tname: \"merge_with_map_and_slice\",\n\t\t\tdst:  map[string]interface{}{\"a\": \"b\"},\n\t\t\tsrc:  map[string]interface{}{\"a\": map[string]interface{}{\"c\": \"d\"}},\n\t\t\twant: map[string]interface{}{\"a\": map[string]interface{}{\"c\": \"d\"}},\n\t\t},\n\t\t{\n\t\t\tname: \"merge_with_slice\",\n\t\t\tdst:  map[string]interface{}{\"a\": \"b\"},\n\t\t\tsrc:  map[string]interface{}{\"a\": []interface{}{\"c\", \"d\"}},\n\t\t\twant: map[string]interface{}{\"a\": []interface{}{\"c\", \"d\"}},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := MergeMaps(tt.dst, tt.src)\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Logf(\"%q failed\", tt.name)\n\t\t\t\tt.Logf(\"got: %v\", got)\n\t\t\t\tt.Logf(\"want: %v\", tt.want)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/app/api.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/pprof\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"github.com/gorilla/handlers\"\n\t\"github.com/gorilla/mux\"\n\t\"github.com/prometheus/client_golang/prometheus/collectors\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nfunc (a *App) newAPIServer() (*http.Server, error) {\n\ta.routes()\n\tvar tlscfg *tls.Config\n\tvar err error\n\tif a.Config.APIServer.TLS != nil {\n\t\ttlscfg, err = utils.NewTLSConfig(\n\t\t\ta.Config.APIServer.TLS.CaFile,\n\t\t\ta.Config.APIServer.TLS.CertFile,\n\t\t\ta.Config.APIServer.TLS.KeyFile,\n\t\t\ta.Config.APIServer.TLS.ClientAuth,\n\t\t\tfalse, // skip-verify\n\t\t\ttrue,  // genSelfSigned\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif a.Config.APIServer.EnableProfiling {\n\t\ta.router.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\ta.router.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\t\ta.router.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\ta.router.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\ta.router.Path(\"/debug/pprof/symbol\").Methods(\"POST\", \"GET\").HandlerFunc(pprof.Symbol)\n\t\ta.router.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\t\ta.router.Handle(\"/debug/pprof/heap\", pprof.Handler(\"heap\"))\n\t\ta.router.Handle(\"/debug/pprof/mutex\", pprof.Handler(\"mutex\"))\n\t\ta.router.Handle(\"/debug/pprof/threadcreate\", pprof.Handler(\"threadcreate\"))\n\t\ta.router.Handle(\"/debug/pprof/goroutine\", pprof.Handler(\"goroutine\"))\n\t\ta.router.Handle(\"/debug/pprof/allocs\", pprof.Handler(\"allocs\"))\n\t\ta.router.Handle(\"/debug/pprof/block\", pprof.Handler(\"block\"))\n\t}\n\n\tif a.Config.APIServer.EnableMetrics {\n\t\ta.router.Handle(\"/metrics\", promhttp.HandlerFor(a.reg, promhttp.HandlerOpts{}))\n\t\ta.reg.MustRegister(collectors.NewGoCollector())\n\t\ta.reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))\n\t\ta.reg.MustRegister(subscribeResponseReceivedCounter)\n\t\ta.reg.MustRegister(subscribeResponseFailedCounter)\n\t\ta.registerTargetMetrics()\n\t\tgo a.startClusterMetrics()\n\t}\n\ts := &http.Server{\n\t\tAddr:         a.Config.APIServer.Address,\n\t\tHandler:      a.router,\n\t\tReadTimeout:  a.Config.APIServer.Timeout / 2,\n\t\tWriteTimeout: a.Config.APIServer.Timeout / 2,\n\t}\n\n\tif tlscfg != nil {\n\t\ts.TLSConfig = tlscfg\n\t}\n\n\treturn s, nil\n}\n\ntype APIErrors struct {\n\tErrors []string `json:\"errors,omitempty\"`\n}\n\nfunc (a *App) handleConfigTargetsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tvar err error\n\ta.configLock.RLock()\n\tdefer a.configLock.RUnlock()\n\tif id == \"\" {\n\t\t// copy targets map\n\t\ttargets := make(map[string]*types.TargetConfig, len(a.Config.Targets))\n\t\tfor n, tc := range a.Config.Targets {\n\t\t\tntc := tc.DeepCopy()\n\t\t\tntc.Password = pointer.ToString(\"****\")\n\t\t\ttargets[n] = ntc\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(targets)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t}\n\t\treturn\n\t}\n\tif t, ok := a.Config.Targets[id]; ok {\n\t\ttc := t.DeepCopy()\n\t\ttc.Password = pointer.ToString(\"****\")\n\t\terr = json.NewEncoder(w).Encode(tc)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t}\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNotFound)\n\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %q not found\", id)}})\n}\n\nfunc (a *App) handleConfigTargetsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\ttc := new(types.TargetConfig)\n\terr = json.Unmarshal(body, tc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\ta.AddTargetConfig(tc)\n}\n\nfunc (a *App) handleConfigTargetsSubscriptions(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif !a.targetConfigExists(id) {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %q not found\", id)}})\n\t\treturn\n\t}\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tvar data map[string][]string\n\terr = json.Unmarshal(body, &data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tsubs, ok := data[\"subscriptions\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"subscriptions not found\"}})\n\t\treturn\n\t}\n\terr = a.UpdateTargetSubscription(a.ctx, id, subs)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (a *App) handleConfigTargetsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\terr := a.DeleteTarget(r.Context(), id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (a *App) handleConfigSubscriptions(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.Subscriptions)\n}\n\nfunc (a *App) handleConfigOutputs(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.Outputs)\n}\n\nfunc (a *App) handleConfigClustering(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.Clustering)\n}\n\nfunc (a *App) handleConfigAPIServer(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.APIServer)\n}\n\nfunc (a *App) handleConfigGNMIServer(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.GnmiServer)\n}\n\nfunc (a *App) handleConfigInputs(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.Inputs)\n}\n\nfunc (a *App) handleConfigProcessors(w http.ResponseWriter, r *http.Request) {\n\ta.handlerCommonGet(w, a.Config.Processors)\n}\n\nfunc (a *App) handleConfig(w http.ResponseWriter, r *http.Request) {\n\tnc := &config.Config{\n\t\tGlobalFlags:   a.Config.GlobalFlags,\n\t\tLocalFlags:    a.Config.LocalFlags,\n\t\tFileConfig:    a.Config.FileConfig,\n\t\tTargets:       make(map[string]*types.TargetConfig, len(a.Config.Targets)),\n\t\tSubscriptions: a.Config.Subscriptions,\n\t\tOutputs:       a.Config.Outputs,\n\t\tInputs:        a.Config.Inputs,\n\t\tProcessors:    a.Config.Processors,\n\t\tClustering:    a.Config.Clustering,\n\t\tGnmiServer:    a.Config.GnmiServer,\n\t\tAPIServer:     a.Config.APIServer,\n\t\tLoader:        a.Config.Loader,\n\t\tActions:       a.Config.Actions,\n\t\tTunnelServer:  a.Config.TunnelServer,\n\t}\n\tfor n, t := range a.Config.Targets {\n\t\ttc := t.DeepCopy()\n\t\ttc.Password = pointer.ToString(\"****\")\n\t\tnc.Targets[n] = tc\n\t}\n\ta.handlerCommonGet(w, nc)\n}\n\nfunc (a *App) handleTargetsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\ta.handlerCommonGet(w, a.Targets)\n\t\treturn\n\t}\n\tif t, ok := a.Targets[id]; ok {\n\t\ta.handlerCommonGet(w, t)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNotFound)\n\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"no targets found\"}})\n}\n\nfunc (a *App) handleTargetsPost(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\ttc, ok := a.Config.Targets[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %q not found\", id)}})\n\t\treturn\n\t}\n\tgo a.TargetSubscribeStream(a.ctx, tc)\n}\n\nfunc (a *App) handleTargetsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tif _, ok := a.Targets[id]; !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %q not found\", id)}})\n\t\treturn\n\t}\n\terr := a.DeleteTarget(a.ctx, id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\ntype clusteringResponse struct {\n\tClusterName           string          `json:\"name,omitempty\"`\n\tNumberOfLockedTargets int             `json:\"number-of-locked-targets\"`\n\tLeader                string          `json:\"leader,omitempty\"`\n\tMembers               []clusterMember `json:\"members,omitempty\"`\n}\n\ntype clusterMember struct {\n\tName                  string   `json:\"name,omitempty\"`\n\tAPIEndpoint           string   `json:\"api-endpoint,omitempty\"`\n\tIsLeader              bool     `json:\"is-leader,omitempty\"`\n\tNumberOfLockedTargets int      `json:\"number-of-locked-nodes\"`\n\tLockedTargets         []string `json:\"locked-targets,omitempty\"`\n}\n\nfunc (a *App) handleClusteringGet(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tresp := new(clusteringResponse)\n\tresp.ClusterName = a.Config.ClusterName\n\n\tvar err error\n\tresp.Leader, err = a.getLeaderName(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tservices, err := a.locker.GetServices(ctx, fmt.Sprintf(\"%s-gnmic-api\", a.Config.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tinstanceNodes, err := a.getInstanceToTargetsMapping(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tfor _, v := range instanceNodes {\n\t\tresp.NumberOfLockedTargets += len(v)\n\t}\n\n\tresp.Members = make([]clusterMember, len(services))\n\tfor i, s := range services {\n\t\tscheme := getServiceScheme(s)\n\t\tresp.Members[i].APIEndpoint = fmt.Sprintf(\"%s%s\", scheme, s.Address)\n\t\tresp.Members[i].Name = strings.TrimSuffix(s.ID, \"-api\")\n\t\tresp.Members[i].IsLeader = resp.Leader == resp.Members[i].Name\n\t\tresp.Members[i].NumberOfLockedTargets = len(instanceNodes[resp.Members[i].Name])\n\t\tresp.Members[i].LockedTargets = instanceNodes[resp.Members[i].Name]\n\t}\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t}\n}\n\nfunc (a *App) handleHealthzGet(w http.ResponseWriter, r *http.Request) {\n\ts := map[string]string{\"status\": \"healthy\"}\n\tb, err := json.Marshal(s)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t}\n}\n\nfunc (a *App) handleAdminShutdown(w http.ResponseWriter, r *http.Request) {\n\ta.Logger.Printf(\"shutting down due to user request\")\n\ta.Cfn()\n}\n\nfunc (a *App) handleClusteringMembersGet(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\t// get leader\n\tleader, err := a.getLeaderName(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tservices, err := a.locker.GetServices(ctx, fmt.Sprintf(\"%s-gnmic-api\", a.Config.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tinstanceNodes, err := a.getInstanceToTargetsMapping(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tmembers := make([]clusterMember, len(services))\n\tfor i, s := range services {\n\t\tscheme := getServiceScheme(s)\n\t\tmembers[i].APIEndpoint = fmt.Sprintf(\"%s%s\", scheme, s.Address)\n\t\tmembers[i].Name = strings.TrimSuffix(s.ID, \"-api\")\n\t\tmembers[i].IsLeader = leader == members[i].Name\n\t\tmembers[i].NumberOfLockedTargets = len(instanceNodes[members[i].Name])\n\t\tmembers[i].LockedTargets = instanceNodes[members[i].Name]\n\t}\n\tb, err := json.Marshal(members)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t}\n}\n\nfunc (a *App) handleClusteringLeaderGet(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\t// get leader\n\tleader, err := a.getLeaderName(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tservices, err := a.locker.GetServices(ctx, fmt.Sprintf(\"%s-gnmic-api\", a.Config.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tinstanceNodes, err := a.getInstanceToTargetsMapping(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tmembers := make([]clusterMember, 1)\n\tfor _, s := range services {\n\t\tif strings.TrimSuffix(s.ID, \"-api\") != leader {\n\t\t\tcontinue\n\t\t}\n\t\tscheme := getServiceScheme(s)\n\t\t// add the leader as a member then break from loop\n\t\tmembers[0].APIEndpoint = fmt.Sprintf(\"%s%s\", scheme, s.Address)\n\t\tmembers[0].Name = strings.TrimSuffix(s.ID, \"-api\")\n\t\tmembers[0].IsLeader = true\n\t\tmembers[0].NumberOfLockedTargets = len(instanceNodes[members[0].Name])\n\t\tmembers[0].LockedTargets = instanceNodes[members[0].Name]\n\t\tbreak\n\t}\n\tb, err := json.Marshal(members)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.Write(b)\n}\n\nfunc (a *App) handleClusteringLeaderDelete(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tif !a.isLeader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\n\terr := a.locker.Unlock(r.Context(), a.leaderKey())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (a *App) handleClusteringDrainInstance(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tif !a.isLeader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tctx := r.Context()\n\tservices, err := a.locker.GetServices(ctx, fmt.Sprintf(\"%s-gnmic-api\", a.Config.ClusterName),\n\t\t[]string{\n\t\t\tfmt.Sprintf(\"instance-name=%s\", id),\n\t\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif len(services) == 0 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"unknown instance: \" + id}})\n\t\treturn\n\t}\n\ttargets, err := a.getInstanceTargets(ctx, id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tgo func() {\n\t\ta.dispatchLock.Lock()\n\t\tdefer a.dispatchLock.Unlock()\n\n\t\tfor _, t := range targets {\n\t\t\terr = a.unassignTarget(a.ctx, t, services[0].ID)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to unassign target %s: %v\", t, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttc, ok := a.Config.Targets[t]\n\t\t\tif !ok {\n\t\t\t\ta.Logger.Printf(\"could not find target %s config\", t)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = a.dispatchTarget(a.ctx, tc, id+\"-api\")\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to dispatch target %s: %v\", t, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (a *App) handleClusterRebalance(w http.ResponseWriter, r *http.Request) {\n\tif a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\tif !a.isLeader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\n\tgo func() {\n\t\terr := a.clusterRebalanceTargets()\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to rebalance: %v\", err)\n\t\t}\n\t}()\n}\n\n// helpers\nfunc headersMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\nfunc (a *App) loggingMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif (!a.Config.APIServer.HealthzDisableLogging && r.URL.Path == \"/api/v1/healthz\") || r.URL.Path != \"/api/v1/healthz\" {\n\t\t\tnext = handlers.LoggingHandler(a.Logger.Writer(), next)\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\nfunc (a *App) handlerCommonGet(w http.ResponseWriter, i interface{}) {\n\ta.configLock.RLock()\n\tdefer a.configLock.RUnlock()\n\tb, err := json.Marshal(i)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t}\n}\n\nfunc (a *App) getLeaderName(ctx context.Context) (string, error) {\n\tleaderKey := fmt.Sprintf(\"gnmic/%s/leader\", a.Config.ClusterName)\n\tleader, err := a.locker.List(ctx, leaderKey)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn leader[leaderKey], nil\n}\n\nfunc (a *App) getInstanceTargets(ctx context.Context, instance string) ([]string, error) {\n\tlocks, err := a.locker.List(ctx, fmt.Sprintf(\"gnmic/%s/targets\", a.Config.Clustering.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.Config.Debug {\n\t\ta.Logger.Println(\"current locks:\", locks)\n\t}\n\ttargets := make([]string, 0)\n\tfor k, v := range locks {\n\t\tif v == instance {\n\t\t\ttargets = append(targets, filepath.Base(k))\n\t\t}\n\t}\n\tsort.Strings(targets)\n\treturn targets, nil\n}\n\n// getServiceScheme returns the scheme of the service based on the protocol tag\n// the tag is expected to be in the format \"protocol=<scheme>\"\n// if the tag is not found, the scheme is \"http\"\nfunc getServiceScheme(service *lockers.Service) string {\n\tscheme := \"http\"\n\tfor _, t := range service.Tags {\n\t\tif strings.HasPrefix(t, \"protocol=\") {\n\t\t\tif strings.Split(t, \"=\")[1] != \"\" {\n\t\t\t\tscheme = strings.Split(t, \"=\")[1]\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn scheme\n}\n"
  },
  {
    "path": "pkg/app/app.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/fsnotify/fsnotify\"\n\t\"github.com/fullstorydev/grpcurl\"\n\t\"github.com/gorilla/mux\"\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\t\"github.com/jhump/protoreflect/desc\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/goyang/pkg/yang\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"golang.org/x/sync/semaphore\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/encoding/gzip\"\n\t\"google.golang.org/grpc/grpclog\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/plugin_manager\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/openconfig/gnmic/pkg/version\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"github.com/zestor-dev/zestor/store/gomap\"\n)\n\nconst (\n\tdefaultHTTPClientTimeout = 5 * time.Second\n)\n\nvar obscuredAttrs = []string{\n\t\"password\",\n}\n\ntype App struct {\n\tctx     context.Context\n\tCfn     context.CancelFunc\n\tRootCmd *cobra.Command\n\n\tsem *semaphore.Weighted\n\t//\n\tconfigLock *sync.RWMutex\n\tConfig     *config.Config\n\tStore      store.Store[any]\n\t// collector\n\tdialOpts      []grpc.DialOption\n\toperLock      *sync.RWMutex\n\tOutputs       map[string]outputs.Output\n\tInputs        map[string]inputs.Input\n\tTargets       map[string]*target.Target\n\ttargetsChan   chan *target.Target\n\tactiveTargets map[string]struct{}\n\ttargetsLockFn map[string]context.CancelFunc\n\trootDesc      desc.Descriptor\n\t// end collector\n\trouter           *mux.Router\n\tlocker           lockers.Locker\n\tclusteringClient *http.Client\n\t// api\n\tapiServices  map[string]*lockers.Service\n\tisLeader     bool\n\tdispatchLock *sync.Mutex\n\t// prometheus registry\n\treg *prometheus.Registry\n\t//\n\tLogger *log.Logger\n\tout    io.Writer\n\t// prompt mode\n\tPromptMode    bool\n\tPromptHistory []string\n\tSchemaTree    *yang.Entry\n\t// yang\n\tmodules *yang.Modules\n\t//\n\twg        *sync.WaitGroup\n\tprintLock *sync.Mutex\n\terrCh     chan error\n\t// gNMI cache, used if a gnmi-server is configured\n\t// with subscribe or proxy commands.\n\tc cache.Cache\n\t// tunnel server\n\t// gRPC server where the tunnel service will be registered\n\tgrpcTunnelSrv *grpc.Server\n\ttunServer     *tunnel.Server\n\tttm           *sync.RWMutex\n\ttunTargets    map[tunnel.Target]struct{}\n\ttunTargetCfn  map[tunnel.Target]context.CancelFunc\n\t// processors plugin manager\n\tpm *plugin_manager.PluginManager\n\n\t// pprof\n\tpprof *pprofServer\n}\n\nfunc New() *App {\n\tctx, cancel := context.WithCancel(context.Background())\n\ta := &App{\n\t\tctx:        ctx,\n\t\tCfn:        cancel,\n\t\tRootCmd:    new(cobra.Command),\n\t\tsem:        semaphore.NewWeighted(1),\n\t\tconfigLock: new(sync.RWMutex),\n\t\tStore:      gomap.NewMemStore(store.StoreOptions[any]{}),\n\t\tConfig:     config.New(),\n\t\treg:        prometheus.NewRegistry(),\n\t\t//\n\t\toperLock:      new(sync.RWMutex),\n\t\tTargets:       make(map[string]*target.Target),\n\t\tOutputs:       make(map[string]outputs.Output),\n\t\tInputs:        make(map[string]inputs.Input),\n\t\ttargetsChan:   make(chan *target.Target),\n\t\tactiveTargets: make(map[string]struct{}),\n\t\ttargetsLockFn: make(map[string]context.CancelFunc),\n\t\t//\n\t\trouter:       mux.NewRouter(),\n\t\tapiServices:  make(map[string]*lockers.Service),\n\t\tdispatchLock: new(sync.Mutex),\n\n\t\tLogger:        log.New(io.Discard, \"[gnmic] \", log.LstdFlags|log.Lmsgprefix),\n\t\tout:           os.Stdout,\n\t\tPromptHistory: make([]string, 0, 128),\n\t\tSchemaTree: &yang.Entry{\n\t\t\tDir: make(map[string]*yang.Entry),\n\t\t},\n\n\t\twg:        new(sync.WaitGroup),\n\t\tprintLock: new(sync.Mutex),\n\t\t// tunnel server\n\t\tttm:          new(sync.RWMutex),\n\t\ttunTargets:   make(map[tunnel.Target]struct{}),\n\t\ttunTargetCfn: make(map[tunnel.Target]context.CancelFunc),\n\n\t\t// pprof\n\t\tpprof: newPprofServer(),\n\t}\n\ta.router.StrictSlash(true)\n\ta.router.Use(headersMiddleware, a.loggingMiddleware)\n\treturn a\n}\n\nfunc (a *App) Context() context.Context {\n\tif a.ctx == nil {\n\t\treturn context.Background()\n\t}\n\treturn a.ctx\n}\n\nfunc (a *App) InitGlobalFlags() {\n\ta.RootCmd.ResetFlags()\n\n\ta.RootCmd.PersistentFlags().StringVar(&a.Config.CfgFile, \"config\", \"\", \"main config file\")\n\ta.RootCmd.PersistentFlags().StringSliceVarP(&a.Config.GlobalFlags.Address, \"address\", \"a\", []string{}, \"comma separated gnmi targets addresses\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Username, \"username\", \"u\", \"\", \"username\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Password, \"password\", \"p\", \"\", \"password\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Port, \"port\", \"\", defaultGrpcPort, \"gRPC port\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Encoding, \"encoding\", \"e\", \"json\", fmt.Sprintf(\"one of %q. Case insensitive\", encodingNames))\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Insecure, \"insecure\", \"\", false, \"insecure connection\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSCa, \"tls-ca\", \"\", \"\", \"tls certificate authority\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSCert, \"tls-cert\", \"\", \"\", \"tls certificate\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSKey, \"tls-key\", \"\", \"\", \"tls key\")\n\ta.RootCmd.PersistentFlags().DurationVarP(&a.Config.GlobalFlags.Timeout, \"timeout\", \"\", 10*time.Second, \"grpc timeout, valid formats: 10s, 1m30s, 1h\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Debug, \"debug\", \"d\", false, \"debug mode\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.EnablePprof, \"enable-pprof\", \"\", false, \"enable go pprof\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.PprofAddr, \"pprof-addr\", \"\", defaultPprofAddr, \"pprof host/IP and port to listen on\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.SkipVerify, \"skip-verify\", \"\", false, \"skip verify tls connection\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.NoPrefix, \"no-prefix\", \"\", false, \"do not add [ip:port] prefix to print output in case of multiple targets\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.ProxyFromEnv, \"proxy-from-env\", \"\", false, \"use proxy from environment\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Format, \"format\", \"\", \"\", fmt.Sprintf(\"output format, one of: %q\", formatNames))\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.LogFile, \"log-file\", \"\", \"\", \"log file path\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Log, \"log\", \"\", false, \"write log messages to stderr\")\n\ta.RootCmd.PersistentFlags().IntVarP(&a.Config.GlobalFlags.MaxMsgSize, \"max-msg-size\", \"\", msgSize, \"max grpc msg size\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.PrintRequest, \"print-request\", \"\", false, \"print request as well as the response(s)\")\n\ta.RootCmd.PersistentFlags().DurationVarP(&a.Config.GlobalFlags.Retry, \"retry\", \"\", defaultRetryTimer, \"retry timer for RPCs\")\n\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSMinVersion, \"tls-min-version\", \"\", \"\", fmt.Sprintf(\"minimum TLS supported version, one of %q\", tlsVersions))\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSMaxVersion, \"tls-max-version\", \"\", \"\", fmt.Sprintf(\"maximum TLS supported version, one of %q\", tlsVersions))\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSVersion, \"tls-version\", \"\", \"\", fmt.Sprintf(\"set TLS version. Overwrites --tls-min-version and --tls-max-version, one of %q\", tlsVersions))\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.LogTLSSecret, \"log-tls-secret\", \"\", false, \"enable logging of a TLS pre-master secret to a file\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSServerName, \"tls-server-name\",\n\t\t\"\", \"\", \"sets the server name to be used when verifying the hostname on the returned certificates unless --skip-verify is set\")\n\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.ClusterName, \"cluster-name\", \"\", defaultClusterName, \"cluster name the gnmic instance belongs to, this is used for target loadsharing via a locker\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.InstanceName, \"instance-name\", \"\", \"\", \"gnmic instance name\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.API, \"api\", \"\", \"\", \"gnmic api address\")\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.ProtoFile, \"proto-file\", \"\", nil, \"proto file(s) name(s)\")\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.ProtoDir, \"proto-dir\", \"\", nil, \"directory to look for proto files specified with --proto-file\")\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.RegisteredExtensions, \"registered-extensions\", \"\", nil, \"registered (custom) extensions\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.RequestExtensions, \"request-extensions\", \"\", \"\", \"add registered (custom) extensions to request\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TargetsFile, \"targets-file\", \"\", \"\", \"path to file with targets configuration\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Gzip, \"gzip\", \"\", false, \"enable gzip compression on gRPC connections\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Token, \"token\", \"\", \"\", \"token value, used for gRPC token based authentication\")\n\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.File, \"file\", \"\", nil, \"YANG file(s)\")\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.Dir, \"dir\", \"\", nil, \"YANG dir(s)\")\n\ta.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.Exclude, \"exclude\", \"\", nil, \"YANG module names to be excluded\")\n\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.UseTunnelServer, \"use-tunnel-server\", \"\", false, \"use tunnel server to dial targets\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.AuthScheme, \"auth-scheme\", \"\", \"\", \"authentication scheme to use for the target's username/password\")\n\ta.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.CalculateLatency, \"calculate-latency\", \"\", false, \"calculate the delta between each message timestamp and the receive timestamp. JSON format only\")\n\ta.RootCmd.PersistentFlags().StringToStringP(\"metadata\", \"H\", a.Config.GlobalFlags.Metadata, \"add metadata to gRPC requests (`key=value`)\")\n\ta.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.PluginProcessorsPath, \"processors-plugins-path\", \"P\", \"\", \"filesystem path where gNMIc will look for even_plugin processors to initialize\")\n\ta.RootCmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(flag.Name, flag)\n\t})\n}\n\nfunc (a *App) PreRunE(cmd *cobra.Command, args []string) error {\n\terr := a.Config.ToStore(a.Store)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif a.Config.Debug {\n\t\tfmt.Println(a.Store.Dump())\n\t}\n\tif a.Config.EnablePprof {\n\t\t_, _, err := net.SplitHostPort(a.Config.GlobalFlags.PprofAddr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"pprof error %v\", err)\n\t\t}\n\t\ta.pprof.Start(a.Config.GlobalFlags.PprofAddr)\n\t\ta.Logger.Printf(\"pprof server started at %s/debug/pprof\", a.Config.GlobalFlags.PprofAddr)\n\t\tgo func() {\n\t\t\terr := <-a.pprof.ErrChan()\n\t\t\ta.Logger.Printf(\"pprof server failed: %v\", err)\n\t\t}()\n\t}\n\n\ta.Config.SetGlobalsFromEnv(a.RootCmd)\n\ta.Config.SetPersistentFlagsFromFile(a.RootCmd)\n\n\tlogOutput, flags, err := a.Config.SetLogger()\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.Logger.SetOutput(logOutput)\n\ta.Logger.SetFlags(flags)\n\ta.Config.Address = config.ParseAddressField(a.Config.Address)\n\ta.Logger.Printf(\"version=%s, commit=%s, date=%s, gitURL=%s, docs=https://gnmic.openconfig.net\", version.Version, version.Commit, version.Date, version.GitURL)\n\n\tif a.Config.Debug {\n\t\tgrpclog.SetLogger(a.Logger) //lint:ignore SA1019 see https://github.com/karimra/gnmic/issues/59\n\t}\n\ta.Logger.Printf(\"using config file %q\", a.Config.FileConfig.ConfigFileUsed())\n\ta.logConfigKVs()\n\treturn a.validateGlobals()\n}\n\nfunc (a *App) validateGlobals() error {\n\tif a.Config.Insecure {\n\t\tif a.Config.SkipVerify {\n\t\t\treturn errors.New(\"flags --insecure and --skip-verify are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSCa != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-ca are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSCert != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-cert are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSKey != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-key are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSVersion != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-version are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSMaxVersion != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-max-version are mutually exclusive\")\n\t\t}\n\t\tif a.Config.TLSMinVersion != \"\" {\n\t\t\treturn errors.New(\"flags --insecure and --tls-min-version are mutually exclusive\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) logConfigKVs() {\n\tif a.Config.Debug {\n\t\tkeys := a.Config.FileConfig.AllKeys()\n\t\tsort.Strings(keys)\n\n\t\tfor _, k := range keys {\n\t\t\tif !a.Config.FileConfig.IsSet(k) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv := a.Config.FileConfig.Get(k)\n\t\t\tfor _, obsc := range obscuredAttrs {\n\t\t\t\tif strings.HasSuffix(k, obsc) {\n\t\t\t\t\tv = \"***\"\n\t\t\t\t}\n\t\t\t}\n\t\t\ta.Logger.Printf(\"%s='%v'(%T)\", k, v, v)\n\t\t}\n\t}\n}\n\nfunc (a *App) PrintMsg(address string, msgName string, msg proto.Message) error {\n\ta.printLock.Lock()\n\tdefer a.printLock.Unlock()\n\tif a.Config.PrintRequest {\n\t\tfmt.Fprint(os.Stderr, msgName)\n\t\tfmt.Fprintln(os.Stderr, \"\")\n\t}\n\tprintPrefix := \"\"\n\tif len(a.Config.TargetsList()) > 1 && !a.Config.NoPrefix {\n\t\tprintPrefix = fmt.Sprintf(\"[%s] \", address)\n\t}\n\n\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\tcase *gnmi.CapabilityResponse:\n\t\tif len(a.Config.Format) == 0 {\n\t\t\ta.printCapResponse(printPrefix, msg)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tregisteredExtensions, err := utils.ParseRegisteredExtensions(a.Config.RegisteredExtensions)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmo := formatters.MarshalOptions{\n\t\tMultiline:            true,\n\t\tIndent:               \"  \",\n\t\tFormat:               a.Config.Format,\n\t\tValuesOnly:           a.Config.GetValuesOnly,\n\t\tCalculateLatency:     a.Config.CalculateLatency,\n\t\tProtoFiles:           a.Config.ProtoFile,\n\t\tProtoDir:             a.Config.ProtoDir,\n\t\tRegisteredExtensions: registeredExtensions,\n\t}\n\tb, err := mo.Marshal(msg, map[string]string{\"source\": address})\n\tif err != nil {\n\t\ta.Logger.Printf(\"error marshaling message: %v\", err)\n\t\tif !a.Config.Log {\n\t\t\tfmt.Printf(\"error marshaling message: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\tsb := strings.Builder{}\n\tsb.Write(b)\n\tfmt.Fprintf(a.out, \"%s\\n\", indent(printPrefix, sb.String()))\n\treturn nil\n}\n\nfunc (a *App) createCollectorDialOpts() {\n\t// append gRPC userAgent name\n\topts := []grpc.DialOption{grpc.WithUserAgent(fmt.Sprintf(\"gNMIc/%s\", version.Version))}\n\t// add maxMsgSize\n\tif a.Config.MaxMsgSize > 0 {\n\t\topts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(a.Config.MaxMsgSize)))\n\t}\n\t// Set NoProxy\n\tif !a.Config.ProxyFromEnv {\n\t\topts = append(opts, grpc.WithNoProxy())\n\t}\n\t// add gzip compressor\n\tif a.Config.Gzip {\n\t\topts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))\n\t}\n\t// enable metrics\n\tif a.Config.APIServer != nil && a.Config.APIServer.EnableMetrics && a.reg != nil {\n\t\tgrpcClientMetrics := grpc_prometheus.NewClientMetrics()\n\t\topts = append(opts,\n\t\t\tgrpc.WithUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()),\n\t\t\tgrpc.WithStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()),\n\t\t)\n\t\ta.reg.MustRegister(grpcClientMetrics)\n\t}\n\ta.dialOpts = opts\n}\n\nfunc (a *App) watchConfig() {\n\ta.Logger.Printf(\"watching config...\")\n\ta.Config.FileConfig.OnConfigChange(a.loadTargets)\n\ta.Config.FileConfig.WatchConfig()\n}\n\nfunc (a *App) loadTargets(e fsnotify.Event) {\n\ta.Logger.Printf(\"got config change notification: %v\", e)\n\tctx, cancel := context.WithCancel(a.ctx)\n\tdefer cancel()\n\terr := a.sem.Acquire(ctx, 1)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to acquire target loading semaphore: %v\", err)\n\t\treturn\n\t}\n\tdefer a.sem.Release(1)\n\tswitch e.Op {\n\tcase fsnotify.Write, fsnotify.Create:\n\t\tnewTargets, err := a.Config.GetTargets()\n\t\tif err != nil && !errors.Is(err, config.ErrNoTargetsFound) {\n\t\t\ta.Logger.Printf(\"failed getting targets from new config: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif !a.inCluster() {\n\t\t\tcurrentTargets := a.Targets\n\t\t\t// delete targets\n\t\t\tfor n := range currentTargets {\n\t\t\t\tif _, ok := newTargets[n]; !ok {\n\t\t\t\t\tif a.Config.Debug {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q deleted from config\", n)\n\t\t\t\t\t}\n\t\t\t\t\terr = a.DeleteTarget(a.ctx, n)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ta.Logger.Printf(\"failed to delete target %q: %v\", n, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// add targets\n\t\t\tvar limiter *time.Ticker\n\t\t\tif a.Config.LocalFlags.SubscribeBackoff > 0 {\n\t\t\t\tlimiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)\n\t\t\t}\n\t\t\tfor n, tc := range newTargets {\n\t\t\t\tif _, ok := currentTargets[n]; !ok {\n\t\t\t\t\tif a.Config.Debug {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q added to config\", n)\n\t\t\t\t\t}\n\t\t\t\t\ta.AddTargetConfig(tc)\n\t\t\t\t\ta.wg.Add(1)\n\t\t\t\t\tgo a.TargetSubscribeStream(a.ctx, tc)\n\t\t\t\t\tif limiter != nil {\n\t\t\t\t\t\t<-limiter.C\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif limiter != nil {\n\t\t\t\tlimiter.Stop()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t// in a cluster\n\t\tif !a.isLeader {\n\t\t\treturn\n\t\t}\n\t\t// in cluster && leader\n\t\tdist, err := a.getTargetToInstanceMapping(a.ctx)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to get target to instance mapping: %v\", err)\n\t\t\treturn\n\t\t}\n\t\t// delete targets\n\t\tfor t := range dist {\n\t\t\tif _, ok := newTargets[t]; !ok {\n\t\t\t\terr = a.deleteTarget(ctx, t)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.Logger.Printf(\"failed to delete target %q: %v\", t, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// add new targets to cluster\n\t\ta.configLock.Lock()\n\t\tfor _, tc := range newTargets {\n\t\t\tif _, ok := dist[tc.Name]; !ok {\n\t\t\t\terr = a.dispatchTarget(a.ctx, tc)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.Logger.Printf(\"failed to add target %q: %v\", tc.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ta.configLock.Unlock()\n\t}\n}\n\nfunc (a *App) startAPIServer() {\n\tif a.Config.APIServer == nil {\n\t\treturn\n\t}\n\ts, err := a.newAPIServer()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to create a new API server: %v\", err)\n\t\treturn\n\t}\n\tgo func() {\n\t\tvar err error\n\t\tif s.TLSConfig != nil {\n\t\t\terr = s.ListenAndServeTLS(\"\", \"\")\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"API server err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\terr = s.ListenAndServe()\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"API server err: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (a *App) LoadProtoFiles() (desc.Descriptor, error) {\n\tif len(a.Config.ProtoFile) == 0 {\n\t\treturn nil, nil\n\t}\n\ta.Logger.Printf(\"loading proto files...\")\n\tdescSource, err := grpcurl.DescriptorSourceFromProtoFiles(a.Config.ProtoDir, a.Config.ProtoFile...)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to load proto files: %v\", err)\n\t\treturn nil, err\n\t}\n\trootDesc, err := descSource.FindSymbol(\"Nokia.SROS.root\")\n\tif err != nil {\n\t\ta.Logger.Printf(\"could not get symbol 'Nokia.SROS.root': %v\", err)\n\t\treturn nil, err\n\t}\n\ta.Logger.Printf(\"loaded proto files\")\n\ta.rootDesc = rootDesc\n\treturn rootDesc, nil\n}\n\n// GetTargets reads the targets configuration from flags or config file.\n// If enabled it will load targets from a configured tunnel server.\nfunc (a *App) GetTargets() (map[string]*types.TargetConfig, error) {\n\ttargetsConfig, err := a.Config.GetTargets()\n\tif errors.Is(err, config.ErrNoTargetsFound) {\n\t\tif a.Config.UseTunnelServer {\n\t\t\ta.Logger.Printf(\"waiting %s for targets to register with the tunnel server...\", a.Config.TunnelServer.TargetWaitTime)\n\t\t\ttime.Sleep(a.Config.TunnelServer.TargetWaitTime)\n\t\t\ta.ttm.RLock()\n\t\t\tdefer a.ttm.RUnlock()\n\t\t\tfor tt := range a.tunTargets {\n\t\t\t\ttc := a.getTunnelTargetMatch(tt)\n\t\t\t\tif tc == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr = a.Config.SetTargetConfigDefaults(tc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ttc.Address = tc.Name\n\t\t\t\ta.AddTargetConfig(tc)\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"failed reading targets config: %v\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn targetsConfig, nil\n}\n\nfunc (a *App) CreateGNMIClient(ctx context.Context, t *target.Target) error {\n\tif t.Client != nil {\n\t\treturn nil\n\t}\n\ttargetDialOpts := a.dialOpts\n\tif a.Config.UseTunnelServer {\n\t\ttargetDialOpts = append(targetDialOpts,\n\t\t\tgrpc.WithContextDialer(a.tunDialerFn(ctx, t.Config)),\n\t\t)\n\t\tt.Config.Address = t.Config.Name\n\t}\n\ta.Logger.Printf(\"creating gRPC client for target %q\", t.Config.Name)\n\tif err := t.CreateGNMIClient(ctx, targetDialOpts...); err != nil {\n\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn fmt.Errorf(\"failed to create a gRPC client for target %q, timeout (%s) reached\", t.Config.Name, t.Config.Timeout)\n\t\t}\n\t\treturn fmt.Errorf(\"failed to create a gRPC client for target %q : %w\", t.Config.Name, err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/capabilities.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nfunc (a *App) CapPreRunE(cmd *cobra.Command, _ []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\ta.createCollectorDialOpts()\n\treturn a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n}\n\nfunc (a *App) CapRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitCapabilitiesFlags(cmd)\n\n\tif a.Config.Format == formatEvent {\n\t\treturn fmt.Errorf(\"format event not supported for Capabilities RPC\")\n\t}\n\tctx, cancel := context.WithCancel(a.ctx)\n\tdefer cancel()\n\t//\n\ttargetsConfig, err := a.GetTargets()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif a.PromptMode {\n\t\t// prompt mode\n\t\tfor _, tc := range targetsConfig {\n\t\t\ta.AddTargetConfig(tc)\n\t\t}\n\t}\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets*2)\n\ta.wg.Add(numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.ReqCapabilities(ctx, tc)\n\t}\n\ta.wg.Wait()\n\treturn a.checkErrors()\n}\n\nfunc (a *App) ReqCapabilities(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\text := make([]*gnmi_ext.Extension, 0) //\n\tif a.Config.PrintRequest {\n\t\terr := a.PrintMsg(tc.Name, \"Capabilities Request:\", &gnmi.CapabilityRequest{\n\t\t\tExtension: ext,\n\t\t})\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t\t}\n\t}\n\n\ta.Logger.Printf(\"sending gNMI CapabilityRequest: gnmi_ext.Extension='%v' to %s\", ext, tc.Name)\n\tresponse, err := a.ClientCapabilities(ctx, tc, ext...)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q, capabilities request failed: %v\", tc.Name, err))\n\t\treturn\n\t}\n\n\terr = a.PrintMsg(tc.Name, \"Capabilities Response:\", response)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t}\n}\n\nfunc (a *App) InitCapabilitiesFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.CapabilitiesVersion, \"version\", \"\", false, \"show gnmi version only\")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/app/clustering.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tdefaultClusterName = \"default-cluster\"\n\tretryTimer         = 10 * time.Second\n\tlockWaitTime       = 100 * time.Millisecond\n\tapiServiceName     = \"gnmic-api\"\n\tprotocolTagName    = \"__protocol\"\n\tmaxRebalanceLoop   = 100\n)\n\nvar (\n\terrNoMoreSuitableServices = errors.New(\"no more suitable services for this target\")\n\terrNotFound               = errors.New(\"not found\")\n)\n\nfunc (a *App) InitLocker() error {\n\tif a.Config.Clustering == nil {\n\t\treturn nil\n\t}\n\tif a.Config.Clustering.Locker == nil {\n\t\treturn errors.New(\"missing locker config under clustering key\")\n\t}\n\n\tif lockerType, ok := a.Config.Clustering.Locker[\"type\"]; ok {\n\t\ta.Logger.Printf(\"starting locker type %q\", lockerType)\n\t\tif initializer, ok := lockers.Lockers[lockerType.(string)]; ok {\n\t\t\tlock := initializer()\n\t\t\terr := lock.Init(a.ctx, a.Config.Clustering.Locker, lockers.WithLogger(a.Logger))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ta.locker = lock\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"unknown locker type %q\", lockerType)\n\t}\n\treturn errors.New(\"missing locker type field\")\n}\n\nfunc (a *App) leaderKey() string {\n\treturn fmt.Sprintf(\"gnmic/%s/leader\", a.Config.Clustering.ClusterName)\n}\n\nfunc (a *App) inCluster() bool {\n\tif a.Config == nil {\n\t\treturn false\n\t}\n\treturn !(a.Config.Clustering == nil)\n}\n\nfunc (a *App) apiServiceRegistration() {\n\taddr, port, _ := net.SplitHostPort(a.Config.APIServer.Address)\n\tp, _ := strconv.Atoi(port)\n\n\ttags := make([]string, 0, 2+len(a.Config.Clustering.Tags))\n\ttags = append(tags, fmt.Sprintf(\"cluster-name=%s\", a.Config.Clustering.ClusterName))\n\ttags = append(tags, fmt.Sprintf(\"instance-name=%s\", a.Config.Clustering.InstanceName))\n\tif a.Config.APIServer.TLS != nil {\n\t\ttags = append(tags, protocolTagName+\"=https\")\n\t} else {\n\t\ttags = append(tags, protocolTagName+\"=http\")\n\t}\n\ttags = append(tags, a.Config.Clustering.Tags...)\n\n\tserviceReg := &lockers.ServiceRegistration{\n\t\tID:      a.Config.Clustering.InstanceName + \"-api\",\n\t\tName:    fmt.Sprintf(\"%s-%s\", a.Config.Clustering.ClusterName, apiServiceName),\n\t\tAddress: a.Config.Clustering.ServiceAddress,\n\t\tPort:    p,\n\t\tTags:    tags,\n\t\tTTL:     5 * time.Second,\n\t}\n\tif serviceReg.Address == \"\" {\n\t\tserviceReg.Address = addr\n\t}\n\tvar err error\n\ta.Logger.Printf(\"registering service %+v\", serviceReg)\n\tfor {\n\t\tselect {\n\t\tcase <-a.ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\terr = a.locker.Register(a.ctx, serviceReg)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"api service registration failed: %v\", err)\n\t\t\t\ttime.Sleep(retryTimer)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *App) startCluster() {\n\tif a.locker == nil || a.Config.Clustering == nil {\n\t\treturn\n\t}\n\n\t// register api service\n\tgo a.apiServiceRegistration()\n\n\tleaderKey := a.leaderKey()\n\tvar err error\nSTART:\n\t// acquire leader key lock\n\tfor {\n\t\ta.isLeader = false\n\t\terr = nil\n\t\ta.isLeader, err = a.locker.Lock(a.ctx, leaderKey, []byte(a.Config.Clustering.InstanceName))\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to acquire leader lock: %v\", err)\n\t\t\ttime.Sleep(retryTimer)\n\t\t\tcontinue\n\t\t}\n\t\tif !a.isLeader {\n\t\t\ttime.Sleep(retryTimer)\n\t\t\tcontinue\n\t\t}\n\t\ta.isLeader = true\n\t\ta.Logger.Printf(\"%q became the leader\", a.Config.Clustering.InstanceName)\n\t\tbreak\n\t}\n\tctx, cancel := context.WithCancel(a.ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tgo a.watchMembers(ctx)\n\t\ta.Logger.Printf(\"leader waiting %s before dispatching targets\", a.Config.Clustering.LeaderWaitTimer)\n\t\ttime.Sleep(a.Config.Clustering.LeaderWaitTimer)\n\t\ta.Logger.Printf(\"leader done waiting, starting loader and dispatching targets\")\n\t\tgo a.startLoader(ctx)\n\t\tgo a.dispatchTargets(ctx)\n\t}()\n\n\tdoneCh, errCh := a.locker.KeepLock(ctx, leaderKey)\n\tselect {\n\tcase <-doneCh:\n\t\ta.Logger.Printf(\"%q lost leader role\", a.Config.Clustering.InstanceName)\n\t\tcancel()\n\t\ta.isLeader = false\n\t\ttime.Sleep(retryTimer)\n\t\tgoto START\n\tcase err := <-errCh:\n\t\ta.Logger.Printf(\"%q failed to maintain the leader key: %v\", a.Config.Clustering.InstanceName, err)\n\t\tcancel()\n\t\ta.isLeader = false\n\t\ttime.Sleep(retryTimer)\n\t\tgoto START\n\tcase <-a.ctx.Done():\n\t\treturn\n\t}\n}\n\nfunc (a *App) watchMembers(ctx context.Context) {\n\tserviceName := fmt.Sprintf(\"%s-%s\", a.Config.Clustering.ClusterName, apiServiceName)\nSTART:\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tmembersChan := make(chan []*lockers.Service)\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase srvs, ok := <-membersChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\ta.updateServices(srvs)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\terr := a.locker.WatchServices(ctx, serviceName, []string{\"cluster-name=\" + a.Config.Clustering.ClusterName}, membersChan, a.Config.Clustering.ServicesWatchTimer)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed getting services: %v\", err)\n\t\t\ttime.Sleep(retryTimer)\n\t\t\tgoto START\n\t\t}\n\t}\n}\n\nfunc (a *App) updateServices(srvs []*lockers.Service) {\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\n\tnumNewSrv := len(srvs)\n\tnumCurrentSrv := len(a.apiServices)\n\n\ta.Logger.Printf(\"received service update with %d service(s)\", numNewSrv)\n\t// no new services and no current services, continue\n\tif numNewSrv == 0 && numCurrentSrv == 0 {\n\t\treturn\n\t}\n\n\t// no new services and having some services, delete all\n\tif numNewSrv == 0 && numCurrentSrv != 0 {\n\t\ta.Logger.Printf(\"deleting all services\")\n\t\ta.apiServices = make(map[string]*lockers.Service)\n\t\treturn\n\t}\n\t// no current services, add all new services\n\tif numCurrentSrv == 0 {\n\t\tfor _, s := range srvs {\n\t\t\ta.Logger.Printf(\"adding service id %q\", s.ID)\n\t\t\ta.apiServices[s.ID] = s\n\t\t}\n\t\treturn\n\t}\n\t//\n\tnewSrvs := make(map[string]*lockers.Service)\n\tfor _, s := range srvs {\n\t\tnewSrvs[s.ID] = s\n\t}\n\t// delete removed services\n\tfor n := range a.apiServices {\n\t\tif _, ok := newSrvs[n]; !ok {\n\t\t\ta.Logger.Printf(\"deleting service id %q\", n)\n\t\t\tdelete(a.apiServices, n)\n\t\t}\n\t}\n\t// add new services\n\tfor n, s := range newSrvs {\n\t\ta.Logger.Printf(\"adding service id %q\", n)\n\t\ta.apiServices[n] = s\n\t}\n}\n\nfunc (a *App) dispatchTargets(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif len(a.apiServices) == 0 {\n\t\t\t\ta.Logger.Printf(\"no services found, waiting...\")\n\t\t\t\ttime.Sleep(a.Config.Clustering.TargetsWatchTimer)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ta.dispatchLock.Lock()\n\t\t\ta.dispatchTargetsOnce(ctx)\n\t\t\ta.dispatchLock.Unlock()\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(a.Config.Clustering.TargetsWatchTimer)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *App) dispatchTargetsOnce(ctx context.Context) {\n\tdctx, cancel := context.WithTimeout(ctx, a.Config.Clustering.TargetsWatchTimer)\n\tdefer cancel()\n\tfor _, tc := range a.Config.Targets {\n\t\terr := a.dispatchTarget(dctx, tc)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to dispatch target %q: %v\", tc.Name, err)\n\t\t}\n\t\tif err == errNotFound {\n\t\t\t// no registered services,\n\t\t\t// no need to continue with other targets,\n\t\t\t// break from the targets loop\n\t\t\tbreak\n\t\t}\n\t\tif err == errNoMoreSuitableServices {\n\t\t\t// target has no suitable matching services,\n\t\t\t// continue to next target without wait\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig, denied ...string) error {\n\tif a.Config.Debug {\n\t\ta.Logger.Printf(\"checking if %q is locked\", tc.Name)\n\t}\n\tkey := fmt.Sprintf(\"gnmic/%s/targets/%s\", a.Config.Clustering.ClusterName, tc.Name)\n\tlocked, err := a.locker.IsLocked(ctx, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif a.Config.Debug {\n\t\ta.Logger.Printf(\"target %q is locked: %v\", tc.Name, locked)\n\t}\n\tif locked {\n\t\treturn nil\n\t}\n\ta.Logger.Printf(\"dispatching target %q\", tc.Name)\n\tif denied == nil {\n\t\tdenied = make([]string, 0)\n\t}\nSELECTSERVICE:\n\tservice, err := a.selectService(tc.Tags, denied...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif service == nil {\n\t\tgoto SELECTSERVICE\n\t}\n\ta.Logger.Printf(\"selected service %+v\", service)\n\t// assign target to selected service\n\terr = a.assignTarget(ctx, tc, service)\n\tif err != nil {\n\t\t// add service to denied list and reselect\n\t\ta.Logger.Printf(\"failed assigning target %q to service %q: %v\", tc.Name, service.ID, err)\n\t\tdenied = append(denied, service.ID)\n\t\tgoto SELECTSERVICE\n\t}\n\t// wait for lock to be acquired\n\tinstanceName := \"\"\n\tfor _, tag := range service.Tags {\n\t\tsplitTag := strings.Split(tag, \"=\")\n\t\tif len(splitTag) == 2 && splitTag[0] == \"instance-name\" {\n\t\t\tinstanceName = splitTag[1]\n\t\t}\n\t}\n\ta.Logger.Printf(\"[cluster-leader] waiting for lock %q to be acquired by %q\", key, instanceName)\n\tretries := 0\nWAIT:\n\tvalues, err := a.locker.List(ctx, key)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed getting value of %q: %v\", key, err)\n\t\ttime.Sleep(lockWaitTime)\n\t\tgoto WAIT\n\t}\n\tif len(values) == 0 {\n\t\tretries++\n\t\tif (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {\n\t\t\ta.Logger.Printf(\"[cluster-leader] max retries reached for target %q and service %q, reselecting...\", tc.Name, service.ID)\n\t\t\terr = a.unassignTarget(ctx, tc.Name, service.ID)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to unassign target %q from %q\", tc.Name, service.ID)\n\t\t\t}\n\t\t\tgoto SELECTSERVICE\n\t\t}\n\t\ttime.Sleep(lockWaitTime)\n\t\tgoto WAIT\n\t}\n\tif instance, ok := values[key]; ok {\n\t\tif instance == instanceName {\n\t\t\ta.Logger.Printf(\"[cluster-leader] lock %q acquired by %q\", key, instanceName)\n\t\t\treturn nil\n\t\t}\n\t}\n\tretries++\n\tif (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {\n\t\ta.Logger.Printf(\"[cluster-leader] max retries reached for target %q and service %q, reselecting...\", tc.Name, service.ID)\n\t\terr = a.unassignTarget(ctx, tc.Name, service.ID)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to unassign target %q from %q\", tc.Name, service.ID)\n\t\t}\n\t\tgoto SELECTSERVICE\n\t}\n\ttime.Sleep(lockWaitTime)\n\tgoto WAIT\n}\n\nfunc (a *App) selectService(tags []string, denied ...string) (*lockers.Service, error) {\n\tnumServices := len(a.apiServices)\n\tswitch numServices {\n\tcase 0:\n\t\treturn nil, errNotFound\n\tcase 1:\n\t\tfor _, s := range a.apiServices {\n\t\t\treturn s, nil\n\t\t}\n\tdefault:\n\t\t// select instance by tags\n\t\tmatchingInstances := make([]string, 0)\n\t\ttagCount := a.getInstancesTagsMatches(tags)\n\t\tif len(tagCount) > 0 {\n\t\t\tmatchingInstances = a.getHighestTagsMatches(tagCount)\n\t\t\ta.Logger.Printf(\"current instances with tags=%v: %+v\", tags, matchingInstances)\n\t\t} else {\n\t\t\tfor n := range a.apiServices {\n\t\t\t\tmatchingInstances = append(matchingInstances, strings.TrimSuffix(n, \"-api\"))\n\t\t\t}\n\t\t}\n\t\tif len(matchingInstances) == 1 {\n\t\t\treturn a.apiServices[fmt.Sprintf(\"%s-api\", matchingInstances[0])], nil\n\t\t}\n\t\t// select instance by load\n\t\tload, err := a.getInstancesLoad(matchingInstances...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta.Logger.Printf(\"current instances load: %+v\", load)\n\t\t// if there are no locks in place, return a random service\n\t\tif len(load) == 0 {\n\t\t\tfor _, n := range matchingInstances {\n\t\t\t\ta.Logger.Printf(\"selected service name: %s\", n)\n\t\t\t\treturn a.apiServices[fmt.Sprintf(\"%s-api\", n)], nil\n\t\t\t}\n\t\t}\n\t\tfor _, d := range denied {\n\t\t\tdelete(load, strings.TrimSuffix(d, \"-api\"))\n\t\t}\n\t\ta.Logger.Printf(\"current instances load after filtering: %+v\", load)\n\t\t// all services were denied\n\t\tif len(load) == 0 {\n\t\t\treturn nil, errNoMoreSuitableServices\n\t\t}\n\t\tss := a.getLowLoadInstance(load)\n\t\ta.Logger.Printf(\"selected service name: %s\", ss)\n\t\tif srv, ok := a.apiServices[fmt.Sprintf(\"%s-api\", ss)]; ok {\n\t\t\treturn srv, nil\n\t\t}\n\t\treturn a.apiServices[ss], nil\n\t}\n\treturn nil, errNotFound\n}\n\nfunc (a *App) getInstancesLoad(instances ...string) (map[string]int, error) {\n\t// read all current locks held by the cluster\n\tlocks, err := a.locker.List(a.ctx, fmt.Sprintf(\"gnmic/%s/targets\", a.Config.Clustering.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.Config.Debug {\n\t\ta.Logger.Println(\"current locks:\", locks)\n\t}\n\tload := make(map[string]int)\n\t// using the read locks, calculate the number of targets each instance has locked\n\tfor _, instance := range locks {\n\t\tif _, ok := load[instance]; !ok {\n\t\t\tload[instance] = 0\n\t\t}\n\t\tload[instance]++\n\t}\n\t// for instances that are registered but do not have any lock,\n\t// add a \"0\" load\n\tfor _, s := range a.apiServices {\n\t\tinstance := strings.TrimSuffix(s.ID, \"-api\")\n\t\tif _, ok := load[instance]; !ok {\n\t\t\tload[instance] = 0\n\t\t}\n\t}\n\tif len(instances) > 0 {\n\t\tfilteredLoad := make(map[string]int)\n\t\tfor _, instance := range instances {\n\t\t\tif l, ok := load[instance]; ok {\n\t\t\t\tfilteredLoad[instance] = l\n\t\t\t} else {\n\t\t\t\tfilteredLoad[instance] = 0\n\t\t\t}\n\t\t}\n\t\treturn filteredLoad, nil\n\t}\n\treturn load, nil\n}\n\n// loop through the current cluster load\n// find the instance with the lowest load\nfunc (a *App) getLowLoadInstance(load map[string]int) string {\n\tvar ss string\n\tvar low = -1\n\tfor s, l := range load {\n\t\tif low < 0 || l < low {\n\t\t\tss = s\n\t\t\tlow = l\n\t\t}\n\t}\n\treturn ss\n}\n\n// loop through the current cluster load\n// find the instance(s) with the highest and lowest load\nfunc (a *App) getHighAndLowInstance(load map[string]int) (string, string) {\n\tvar highIns, lowIns string\n\tvar high = -1\n\tvar low = -1\n\tfor s, l := range load {\n\t\tif high < 0 || l > high {\n\t\t\thighIns = s\n\t\t\thigh = l\n\t\t}\n\t\tif low < 0 || l < low {\n\t\t\tlowIns = s\n\t\t\tlow = l\n\t\t}\n\t}\n\treturn highIns, lowIns\n}\n\nfunc (a *App) getTargetToInstanceMapping(ctx context.Context) (map[string]string, error) {\n\tlocks, err := a.locker.List(ctx, fmt.Sprintf(\"gnmic/%s/targets\", a.Config.Clustering.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.Config.Debug {\n\t\ta.Logger.Println(\"current locks:\", locks)\n\t}\n\tfor k, v := range locks {\n\t\tdelete(locks, k)\n\t\tlocks[filepath.Base(k)] = v\n\t}\n\treturn locks, nil\n}\n\nfunc (a *App) getInstanceToTargetsMapping(ctx context.Context) (map[string][]string, error) {\n\tlocks, err := a.locker.List(ctx, fmt.Sprintf(\"gnmic/%s/targets\", a.Config.Clustering.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.Config.Debug {\n\t\ta.Logger.Println(\"current locks:\", locks)\n\t}\n\trs := make(map[string][]string)\n\tfor k, v := range locks {\n\t\tif _, ok := rs[v]; !ok {\n\t\t\trs[v] = make([]string, 0)\n\t\t}\n\t\trs[v] = append(rs[v], filepath.Base(k))\n\t}\n\tfor _, ls := range rs {\n\t\tsort.Strings(ls)\n\t}\n\treturn rs, nil\n}\n\nfunc (a *App) getInstancesTagsMatches(tags []string) map[string]int {\n\tmaxMatch := make(map[string]int)\n\tnumTags := len(tags)\n\tif numTags == 0 {\n\t\treturn maxMatch\n\t}\n\tfor name, s := range a.apiServices {\n\t\tname = strings.TrimSuffix(name, \"-api\")\n\t\tmaxMatch[name] = 0\n\t\tfor i, tag := range s.Tags {\n\t\t\tif i+1 > numTags {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif tag == tags[i] {\n\t\t\t\tmaxMatch[name]++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn maxMatch\n}\n\nfunc (a *App) getHighestTagsMatches(tagsCount map[string]int) []string {\n\tvar ss = make([]string, 0)\n\tvar high = -1\n\tfor s, c := range tagsCount {\n\t\tif high < 0 || c > high {\n\t\t\tss = []string{strings.TrimSuffix(s, \"-api\")}\n\t\t\thigh = c\n\t\t\tcontinue\n\t\t}\n\t\tif high == c {\n\t\t\tss = append(ss, strings.TrimSuffix(s, \"-api\"))\n\t\t}\n\t}\n\treturn ss\n}\n\nfunc (a *App) deleteTarget(ctx context.Context, name string) error {\n\terr := a.createAPIClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\terrs := make([]error, 0, len(a.apiServices))\n\tfor _, s := range a.apiServices {\n\t\tscheme := a.getServiceScheme(s)\n\t\tctx, cancel := context.WithCancel(ctx)\n\t\tdefer cancel()\n\t\turl := fmt.Sprintf(\"%s://%s/api/v1/config/targets/%s\", scheme, s.Address, name)\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to create a delete request: %v\", err)\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\n\t\trsp, err := a.clusteringClient.Do(req)\n\t\tif err != nil {\n\t\t\trsp.Body.Close()\n\t\t\ta.Logger.Printf(\"failed deleting target %q: %v\", name, err)\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t\trsp.Body.Close()\n\t\ta.Logger.Printf(\"received response code=%d, for DELETE %s\", rsp.StatusCode, url)\n\t}\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"there was %d error(s) while deleting target %q\", len(errs), name)\n}\n\nfunc (a *App) assignTarget(ctx context.Context, tc *types.TargetConfig, service *lockers.Service) error {\n\t// encode target config\n\tbuffer := new(bytes.Buffer)\n\terr := json.NewEncoder(buffer).Encode(tc)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.createAPIClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tscheme := a.getServiceScheme(service)\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf(\"%s://%s/api/v1/config/targets\", scheme, service.Address), buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := a.clusteringClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\ta.Logger.Printf(\"got response code=%d for target %q config add from %q\", resp.StatusCode, tc.Name, service.Address)\n\tif resp.StatusCode > 200 {\n\t\treturn fmt.Errorf(\"status code=%d\", resp.StatusCode)\n\t}\n\t// send target start\n\treq, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf(\"%s://%s/api/v1/targets/%s\", scheme, service.Address, tc.Name), new(bytes.Buffer))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err = a.clusteringClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\ta.Logger.Printf(\"got response code=%d for target %q assignment from %q\", resp.StatusCode, tc.Name, service.Address)\n\tif resp.StatusCode > 200 {\n\t\treturn fmt.Errorf(\"status code=%d\", resp.StatusCode)\n\t}\n\treturn nil\n}\n\nfunc (a *App) unassignTarget(ctx context.Context, name string, serviceID string) error {\n\terr := a.createAPIClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s, ok := a.apiServices[serviceID]; ok {\n\t\tscheme := a.getServiceScheme(s)\n\t\turl := fmt.Sprintf(\"%s://%s/api/v1/targets/%s\", scheme, s.Address, name)\n\t\tctx, cancel := context.WithTimeout(ctx, 10*time.Second)\n\t\tdefer cancel()\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trsp, err := a.clusteringClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer rsp.Body.Close()\n\t\ta.Logger.Printf(\"received response code=%d, for DELETE %s\", rsp.StatusCode, url)\n\t}\n\treturn nil\n}\n\nfunc (a *App) getServiceScheme(service *lockers.Service) string {\n\tscheme := \"http\"\n\tfor _, t := range service.Tags {\n\t\tif strings.HasPrefix(t, protocolTagName+\"=\") {\n\t\t\tscheme = strings.Split(t, \"=\")[1]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn scheme\n}\n\nfunc (a *App) createAPIClient() error {\n\tif a.clusteringClient != nil {\n\t\treturn nil\n\t}\n\t// no certs\n\tif a.Config.Clustering.TLS == nil {\n\t\ta.clusteringClient = &http.Client{\n\t\t\tTimeout: defaultHTTPClientTimeout,\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn nil\n\t}\n\t// with certs\n\ttlsConfig, err := utils.NewTLSConfig(\n\t\ta.Config.Clustering.TLS.CaFile,\n\t\ta.Config.Clustering.TLS.CertFile,\n\t\ta.Config.Clustering.TLS.KeyFile, \"\",\n\t\ta.Config.Clustering.TLS.SkipVerify,\n\t\tfalse)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.clusteringClient = &http.Client{\n\t\tTimeout: defaultHTTPClientTimeout,\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t}\n\treturn nil\n}\n\nfunc (a *App) clusterRebalanceTargets() error {\n\ta.dispatchLock.Lock()\n\tdefer a.dispatchLock.Unlock()\n\n\trebalanceCount := 0 // counts the number of iterations\n\tmaxIter := -1       // stores the maximum expected number of iterations\n\tfor {\n\t\t// get most loaded and least loaded\n\t\tload, err := a.getInstancesLoad()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thighest, lowest := a.getHighAndLowInstance(load)\n\t\tlowLoad := load[lowest]\n\t\thighLoad := load[highest]\n\t\tdelta := highLoad - lowLoad\n\t\tif maxIter < 0 { // set max number of iteration to delta/2\n\t\t\tmaxIter = delta / 2\n\t\t\tif maxIter > maxRebalanceLoop {\n\t\t\t\tmaxIter = maxRebalanceLoop\n\t\t\t}\n\t\t}\n\t\ta.Logger.Printf(\"rebalancing: high instance: %s=%d, low instance %s=%d\", highest, highLoad, lowest, lowLoad)\n\t\t// nothing to do\n\t\tif delta < 2 {\n\t\t\treturn nil\n\t\t}\n\t\tif rebalanceCount >= maxIter {\n\t\t\treturn nil\n\t\t}\n\t\t// there is some work to do\n\t\t// get highest load instance targets\n\t\thighInstanceTargets, err := a.getInstanceTargets(a.ctx, highest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(highInstanceTargets) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\t// pick one and move it to the lowest load instance\n\t\terr = a.unassignTarget(a.ctx, highInstanceTargets[0], highest+\"-api\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttc, ok := a.Config.Targets[highInstanceTargets[0]]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"could not find target %s config\", highInstanceTargets[0])\n\t\t}\n\t\terr = a.dispatchTarget(a.ctx, tc)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trebalanceCount++\n\t}\n}\n"
  },
  {
    "path": "pkg/app/clustering_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nvar testSetGetInstancesTagsMatches = map[string]struct {\n\ta      *App\n\tinput  []string\n\tresult map[string]int\n}{\n\t\"test1\": {\n\t\ta: &App{\n\t\t\tapiServices: map[string]*lockers.Service{\n\t\t\t\t\"gnmic1-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t\t\"tag3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic2-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic3-api\": {},\n\t\t\t},\n\t\t},\n\t\tinput: []string{\n\t\t\t\"tag1\",\n\t\t\t\"tag2\",\n\t\t},\n\t\tresult: map[string]int{\n\t\t\t\"gnmic1\": 2,\n\t\t\t\"gnmic2\": 2,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t},\n\t\"test2\": {\n\t\ta: &App{\n\t\t\tapiServices: map[string]*lockers.Service{\n\t\t\t\t\"gnmic1-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t\t\"tag3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic2-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic3-api\": {},\n\t\t\t},\n\t\t},\n\t\tinput: []string{\n\t\t\t\"tag1\",\n\t\t},\n\t\tresult: map[string]int{\n\t\t\t\"gnmic1\": 1,\n\t\t\t\"gnmic2\": 1,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t},\n\t\"test3\": {\n\t\ta: &App{\n\t\t\tapiServices: map[string]*lockers.Service{\n\t\t\t\t\"gnmic1-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t\t\"tag3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic2-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic3-api\": {},\n\t\t\t},\n\t\t},\n\t\tinput:  []string{},\n\t\tresult: make(map[string]int),\n\t},\n\t\"test4\": {\n\t\ta: &App{\n\t\t\tapiServices: map[string]*lockers.Service{\n\t\t\t\t\"gnmic1-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t\t\"tag3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic2-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic3-api\": {},\n\t\t\t},\n\t\t},\n\t\tinput: []string{\n\t\t\t\"tag2\",\n\t\t},\n\t\tresult: map[string]int{\n\t\t\t\"gnmic1\": 0,\n\t\t\t\"gnmic2\": 0,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t},\n\t\"test5\": {\n\t\ta: &App{\n\t\t\tapiServices: map[string]*lockers.Service{\n\t\t\t\t\"gnmic1-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t\t\"tag3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic2-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t\t\"tag2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"gnmic3-api\": {\n\t\t\t\t\tTags: []string{\n\t\t\t\t\t\t\"tag1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tinput: []string{\n\t\t\t\"tag1\",\n\t\t\t\"tag2\",\n\t\t\t\"tag3\",\n\t\t},\n\t\tresult: map[string]int{\n\t\t\t\"gnmic1\": 3,\n\t\t\t\"gnmic2\": 2,\n\t\t\t\"gnmic3\": 1,\n\t\t},\n\t},\n}\n\nvar testSetGetHighestTagsMatches = map[string]struct {\n\tinput  map[string]int\n\tresult []string\n}{\n\t\"test1\": {\n\t\tinput: map[string]int{\n\t\t\t\"gnmic1\": 2,\n\t\t\t\"gnmic2\": 2,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t\tresult: []string{\n\t\t\t\"gnmic1\",\n\t\t\t\"gnmic2\",\n\t\t},\n\t},\n\t\"test2\": {\n\t\tinput: map[string]int{\n\t\t\t\"gnmic1\": 0,\n\t\t\t\"gnmic2\": 0,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t\tresult: []string{\n\t\t\t\"gnmic1\",\n\t\t\t\"gnmic2\",\n\t\t\t\"gnmic3\",\n\t\t},\n\t},\n\t\"test3\": {\n\t\tinput: map[string]int{\n\t\t\t\"gnmic1\": 1,\n\t\t\t\"gnmic2\": 1,\n\t\t\t\"gnmic3\": 1,\n\t\t},\n\t\tresult: []string{\n\t\t\t\"gnmic1\",\n\t\t\t\"gnmic2\",\n\t\t\t\"gnmic3\",\n\t\t},\n\t},\n\t\"test4\": {\n\t\tinput: map[string]int{\n\t\t\t\"gnmic1\": 0,\n\t\t\t\"gnmic2\": 0,\n\t\t\t\"gnmic3\": 0,\n\t\t},\n\t\tresult: []string{\n\t\t\t\"gnmic1\",\n\t\t\t\"gnmic2\",\n\t\t\t\"gnmic3\",\n\t\t},\n\t},\n\t\"test5\": {\n\t\tinput: map[string]int{\n\t\t\t\"gnmic1\": 3,\n\t\t\t\"gnmic2\": 2,\n\t\t\t\"gnmic3\": 1,\n\t\t},\n\t\tresult: []string{\n\t\t\t\"gnmic1\",\n\t\t},\n\t},\n}\n\nfunc TestGetInstancesTagsMatches(t *testing.T) {\n\tfor name, item := range testSetGetInstancesTagsMatches {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tres := item.a.getInstancesTagsMatches(item.input)\n\t\t\tt.Logf(\"exp value: %+v\", item.result)\n\t\t\tt.Logf(\"got value: %+v\", res)\n\t\t\tif !cmp.Equal(item.result, res) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetHighestTagsMatches(t *testing.T) {\n\ta := &App{}\n\tfor name, item := range testSetGetHighestTagsMatches {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tres := a.getHighestTagsMatches(item.input)\n\t\t\tsort.Strings(res)\n\t\t\tt.Logf(\"exp value: %+v\", item.result)\n\t\t\tt.Logf(\"got value: %+v\", res)\n\t\t\tif !cmp.Equal(item.result, res) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/app/const.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport \"time\"\n\nconst (\n\tdefaultGrpcPort   = \"57400\"\n\tmsgSize           = 512 * 1024 * 1024\n\tdefaultRetryTimer = 10 * time.Second\n\tdefaultPprofAddr  = \"127.0.0.1:6060\"\n\n\tformatJSON      = \"json\"\n\tformatPROTOJSON = \"protojson\"\n\tformatPROTOTEXT = \"prototext\"\n\tformatEvent     = \"event\"\n\tformatPROTO     = \"proto\"\n\tformatFLAT      = \"flat\"\n)\n\nvar encodingNames = []string{\n\t\"json\",\n\t\"bytes\",\n\t\"proto\",\n\t\"ascii\",\n\t\"json_ietf\",\n}\n\nvar formatNames = []string{\n\tformatJSON,\n\tformatPROTOJSON,\n\tformatPROTOTEXT,\n\tformatEvent,\n\tformatPROTO,\n\tformatFLAT,\n}\n\nvar tlsVersions = []string{\"1.3\", \"1.2\", \"1.1\", \"1.0\", \"1\"}\n"
  },
  {
    "path": "pkg/app/diff.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype targetDiffResponse struct {\n\tt  string\n\tr  *gnmi.GetResponse\n\trs []proto.Message\n}\n\n// InitDiffFlags used to init or reset diffCmd flags for gnmic-prompt mode\nfunc (a *App) InitDiffFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.DiffPath, \"path\", \"\", []string{}, \"diff request paths\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffRef, \"ref\", \"\", \"\", \"reference gNMI target to compare the other targets to\")\n\tcmd.MarkFlagRequired(\"ref\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.DiffCompare, \"compare\", \"\", []string{}, \"gNMI targets to compare to the reference\")\n\tcmd.MarkFlagRequired(\"compare\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffPrefix, \"prefix\", \"\", \"\", \"diff request prefix\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.DiffModel, \"model\", \"\", []string{}, \"diff request models\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffType, \"type\", \"t\", \"ALL\", \"data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffTarget, \"target\", \"\", \"\", \"get request target\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSub, \"sub\", \"\", false, \"use subscribe ONCE mode instead of a get request\")\n\tcmd.Flags().Uint32VarP(&a.Config.LocalFlags.DiffQos, \"qos\", \"\", 0, \"QoS marking in case subscribe RPC is used\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) DiffPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\tif len(a.Config.LocalFlags.DiffPath) == 0 {\n\t\ta.Config.LocalFlags.DiffPath = []string{\"/\"}\n\t}\n\ta.Config.LocalFlags.DiffPath = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffPath)\n\ta.Config.LocalFlags.DiffModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffModel)\n\ta.Config.LocalFlags.DiffCompare = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffCompare)\n\n\ta.createCollectorDialOpts()\n\treturn a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n}\n\nfunc (a *App) DiffRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitDiffFlags(cmd)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// setupCloseHandler(cancel)\n\trefTarget, targetsConfig, err := a.Config.GetDiffTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed getting diff targets config: %v\", err)\n\t}\n\tif refTarget == nil {\n\t\treturn fmt.Errorf(\"failed getting diff reference target config\")\n\t}\n\tif len(targetsConfig) == 0 {\n\t\treturn fmt.Errorf(\"failed getting diff compare targets config\")\n\t}\n\tif !a.PromptMode {\n\t\t// cfg := &collector.Config{\n\t\t// \tDebug:               a.Config.Debug,\n\t\t// \tFormat:              a.Config.Format,\n\t\t// \tTargetReceiveBuffer: a.Config.TargetBufferSize,\n\t\t// \tRetryTimer:          a.Config.Retry,\n\t\t// }\n\t\t// allTargets := make(map[string]*types.TargetConfig)\n\t\t// for n, tc := range targetsConfig {\n\t\t// \tallTargets[n] = tc\n\t\t// }\n\t\t// allTargets[refTarget.Name] = refTarget\n\n\t\t// a.collector = collector.New(cfg, allTargets,\n\t\t// \tcollector.WithDialOptions(a.createCollectorDialOpts()),\n\t\t// \tcollector.WithLogger(a.Logger),\n\t\t// )\n\t} else {\n\t\t// prompt mode\n\t\ta.AddTargetConfig(refTarget)\n\t\tfor _, tc := range targetsConfig {\n\t\t\ta.AddTargetConfig(tc)\n\t\t}\n\t}\n\n\tnumTargets := len(targetsConfig) + 1\n\ta.errCh = make(chan error, numTargets*2)\n\ta.wg.Add(numTargets)\n\n\tcompares := make([]*types.TargetConfig, 0, len(targetsConfig))\n\tfor _, t := range targetsConfig {\n\t\tcompares = append(compares, t)\n\t}\n\tsort.Slice(compares, func(i, j int) bool {\n\t\treturn compares[i].Name < compares[j].Name\n\t})\n\n\terr = a.diff(ctx, cmd, refTarget, compares)\n\tif err != nil {\n\t\ta.logError(err)\n\t}\n\treturn a.checkErrors()\n}\n\nfunc (a *App) diff(ctx context.Context, cmd *cobra.Command, ref *types.TargetConfig, compare []*types.TargetConfig) error {\n\tif a.Config.DiffSub {\n\t\treturn a.subscribeBasedDiff(ctx, cmd, ref, compare)\n\t}\n\treturn a.getBasedDiff(ctx, ref, compare)\n}\n\nfunc (a *App) subscribeBasedDiff(ctx context.Context, cmd *cobra.Command, ref *types.TargetConfig, compare []*types.TargetConfig) error {\n\tsubReq, err := a.Config.CreateDiffSubscribeRequest(cmd)\n\tif err != nil {\n\t\tif errors.Is(errors.Unwrap(err), config.ErrConfig) {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tnumCompares := len(compare)\n\trefResponse := make([]proto.Message, 0)\n\trspChan := make(chan *targetDiffResponse, numCompares)\n\ta.operLock.Lock()\n\trefTarget, err := a.initTarget(ref)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tgo func() {\n\t\tdefer a.wg.Done()\n\t\terr = refTarget.CreateGNMIClient(ctx, a.dialOpts...)\n\t\tif err != nil {\n\t\t\ta.logError(err)\n\t\t\treturn\n\t\t}\n\t\ta.Logger.Printf(\"sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s\",\n\t\t\tsubReq.Request, subReq.GetSubscribe().GetMode(), subReq.GetSubscribe().GetEncoding(), ref)\n\t\trspChan, errChan := refTarget.SubscribeOnceChan(ctx, subReq)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-rspChan:\n\t\t\t\tswitch r.Response.(type) {\n\t\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\t\trefResponse = append(refResponse, r)\n\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase err := <-errChan:\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\ta.logError(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, tc := range compare {\n\t\ta.operLock.Lock()\n\t\tt, err := a.initTarget(tc)\n\t\ta.operLock.Unlock()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo func(tName string) {\n\t\t\tdefer a.wg.Done()\n\t\t\terr = t.CreateGNMIClient(ctx, a.dialOpts...)\n\t\t\tif err != nil {\n\t\t\t\ta.logError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresponses := make([]proto.Message, 0)\n\t\t\ta.Logger.Printf(\"sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s\",\n\t\t\t\tsubReq.Request, subReq.GetSubscribe().GetMode(), subReq.GetSubscribe().GetEncoding(), tName)\n\t\t\tsubRspChan, errChan := t.SubscribeOnceChan(ctx, subReq)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase r := <-subRspChan:\n\t\t\t\t\tswitch r.Response.(type) {\n\t\t\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\t\t\tresponses = append(responses, r)\n\t\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\t\trspChan <- &targetDiffResponse{\n\t\t\t\t\t\t\tt:  tName,\n\t\t\t\t\t\t\trs: responses,\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\tif err == io.EOF {\n\t\t\t\t\t\trspChan <- &targetDiffResponse{\n\t\t\t\t\t\t\tt:  tName,\n\t\t\t\t\t\t\trs: responses,\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\ta.logError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(tc.Name)\n\t\tcontinue\n\t}\n\ta.wg.Wait()\n\tclose(rspChan)\n\n\trsps := make([]*targetDiffResponse, 0, numCompares)\n\tfor r := range rspChan {\n\t\trsps = append(rsps, r)\n\t}\n\tif len(rsps) == 0 {\n\t\ta.Logger.Printf(\"missing response(s)\")\n\t\treturn fmt.Errorf(\"missing response(s)\")\n\t}\n\n\tfor _, cr := range rsps {\n\t\tfmt.Fprintf(os.Stderr, \"%q vs %q\\n\", ref.Name, cr.t)\n\t\terr = a.responsesDiff(refResponse, cr.rs)\n\t\tif err != nil {\n\t\t\ta.logError(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) getBasedDiff(ctx context.Context, ref *types.TargetConfig, compare []*types.TargetConfig) error {\n\tgetReq, err := a.Config.CreateDiffGetRequest()\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tvar refResponse proto.Message\n\tnumCompares := len(compare)\n\n\tgo func() {\n\t\tdefer a.wg.Done()\n\t\ta.Logger.Printf(\"sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s\",\n\t\t\tgetReq.Prefix, getReq.Path, getReq.Type, getReq.Encoding, getReq.UseModels, getReq.Extension, ref)\n\t\trefResponse, err = a.ClientGet(ctx, ref, getReq)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"target %q get request failed: %v\", ref, err))\n\t\t\treturn\n\t\t}\n\t}()\n\trspChan := make(chan *targetDiffResponse, numCompares)\n\tfor _, tc := range compare {\n\t\tgo func(tc *types.TargetConfig) {\n\t\t\tdefer a.wg.Done()\n\t\t\ta.Logger.Printf(\"sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s\",\n\t\t\t\tgetReq.Prefix, getReq.Path, getReq.Type, getReq.Encoding, getReq.UseModels, getReq.Extension, tc.Name)\n\t\t\tresponse, err := a.ClientGet(ctx, tc, getReq)\n\t\t\tif err != nil {\n\t\t\t\ta.logError(fmt.Errorf(\"target %q get request failed: %v\", tc.Name, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\trspChan <- &targetDiffResponse{\n\t\t\t\tt: tc.Name,\n\t\t\t\tr: response,\n\t\t\t}\n\t\t}(tc)\n\t}\n\ta.wg.Wait()\n\tclose(rspChan)\n\trsps := make([]*targetDiffResponse, 0, numCompares)\n\tfor r := range rspChan {\n\t\trsps = append(rsps, r)\n\t}\n\tif len(rsps) == 0 {\n\t\treturn fmt.Errorf(\"no responses received\")\n\t}\n\n\tsort.Slice(rsps, func(i, j int) bool {\n\t\treturn rsps[i].t < rsps[j].t\n\t})\n\tfor _, cr := range rsps {\n\t\tfmt.Fprintf(os.Stderr, \"%q vs %q\\n\", ref.Name, cr.t)\n\t\terr = a.responsesDiff([]proto.Message{refResponse}, []proto.Message{cr.r})\n\t\tif err != nil {\n\t\t\ta.logError(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) responsesDiff(r1, r2 []proto.Message) error {\n\trs1, err := formatters.ResponsesFlat(r1...)\n\tif err != nil {\n\t\treturn err\n\t}\n\trs2, err := formatters.ResponsesFlat(r2...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar df diffs\n\tfor p, v := range rs1 {\n\t\tif v2, ok := rs2[p]; ok {\n\t\t\tif !reflect.DeepEqual(v, v2) {\n\t\t\t\tdf = append(df, diff{add: false, path: p, value: fmt.Sprintf(\"%v\", v)})\n\t\t\t\tdf = append(df, diff{add: true, path: p, value: fmt.Sprintf(\"%v\", v2)})\n\t\t\t}\n\t\t\tdelete(rs2, p)\n\t\t\tcontinue\n\t\t}\n\t\tdf = append(df, diff{add: false, path: p, value: fmt.Sprintf(\"%v\", v)})\n\t\tcontinue\n\t}\n\tfor p, v := range rs2 {\n\t\tdf = append(df, diff{add: true, path: p, value: fmt.Sprintf(\"%v\", v)})\n\t}\n\tsort.Slice(df, func(i, j int) bool {\n\t\treturn df[i].path < df[j].path\n\t})\n\tfmt.Println(df)\n\treturn nil\n}\n\ntype diff struct {\n\tadd   bool\n\tpath  string\n\tvalue string\n}\n\ntype diffs []diff\n\nfunc (ds diffs) String() string {\n\tml := 0\n\tfor _, d := range ds {\n\t\tlp := len(d.path)\n\t\tif lp > ml {\n\t\t\tml = lp\n\t\t}\n\t}\n\ttpl := fmt.Sprintf(\"%%-%ds\", ml)\n\tsb := new(strings.Builder)\n\tnumDiffs := len(ds)\n\tfor i, d := range ds {\n\t\tif d.add {\n\t\t\tsb.WriteString(\"+\\t\")\n\t\t} else {\n\t\t\tsb.WriteString(\"-\\t\")\n\t\t}\n\t\tsb.WriteString(fmt.Sprintf(tpl, d.path))\n\t\tsb.WriteString(\": \")\n\t\tsb.WriteString(d.value)\n\t\tif i < numDiffs-1 {\n\t\t\tsb.WriteString(\"\\n\")\n\t\t}\n\t}\n\treturn sb.String()\n}\n"
  },
  {
    "path": "pkg/app/generate.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/huandu/xstrings\"\n\t\"github.com/openconfig/goyang/pkg/yang\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\n// options for formatting keys when generating yaml/json payloads\ntype keyOpts struct {\n\tcamelCase bool\n\tsnakeCase bool\n}\n\nfunc (ko *keyOpts) format(s string) string {\n\tif ko.camelCase {\n\t\treturn xstrings.ToCamelCase(s)\n\t}\n\tif ko.snakeCase {\n\t\treturn xstrings.ToSnakeCase(s)\n\t}\n\treturn s\n}\n\nfunc (a *App) GenerateRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitGenerateFlags(cmd)\n\tvar output = os.Stdout\n\tif a.Config.GenerateOutput != \"\" {\n\t\tf, err := os.OpenFile(a.Config.GenerateOutput, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\toutput = f\n\t}\n\terr := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := make(map[string]interface{})\n\tkOpts := &keyOpts{\n\t\tcamelCase: a.Config.LocalFlags.GenerateCamelCase,\n\t\tsnakeCase: a.Config.LocalFlags.GenerateSnakeCase,\n\t}\n\tfor _, e := range a.SchemaTree.Dir {\n\t\te.FixChoice()\n\t\tnm := toMap(e, a.Config.GenerateConfigOnly, kOpts)\n\t\tif nm == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch nm := nm.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor k, v := range nm {\n\t\t\t\tm[kOpts.format(k)] = v\n\t\t\t}\n\t\tcase []interface{}, string:\n\t\t\tm[kOpts.format(e.Name)] = nm\n\t\t}\n\t}\n\n\tv, err := getSubMapByPath(a.Config.GeneratePath, m, kOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif output != os.Stdout {\n\t\terr = output.Truncate(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif a.Config.GenerateJSON {\n\t\tenc := json.NewEncoder(output)\n\t\tenc.SetIndent(\"\", \"  \")\n\t\treturn enc.Encode(v)\n\t}\n\treturn yaml.NewEncoder(output).Encode(v)\n}\n\nfunc (a *App) GeneratePreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\tif a.Config.LocalFlags.GenerateCamelCase && a.Config.LocalFlags.GenerateSnakeCase {\n\t\treturn errors.New(\"flags --camel-case and --snake-case are mutually exclusive\")\n\t}\n\treturn a.yangFilesPreProcessing()\n}\n\nfunc (a *App) yangFilesPreProcessing() error {\n\ta.Config.GlobalFlags.Dir = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.Dir)\n\ta.Config.GlobalFlags.File = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.File)\n\ta.Config.GlobalFlags.Exclude = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.Exclude)\n\n\tvar err error\n\ta.Config.GlobalFlags.Dir, err = resolveGlobs(a.Config.GlobalFlags.Dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.Config.GlobalFlags.File, err = resolveGlobs(a.Config.GlobalFlags.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.modules = yang.NewModules()\n\tfor _, dirpath := range a.Config.GlobalFlags.Dir {\n\t\texpanded, err := yang.PathsWithModules(dirpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif a.Config.Debug {\n\t\t\tfor _, fdir := range expanded {\n\t\t\t\ta.Logger.Printf(\"adding %s to YANG paths\", fdir)\n\t\t\t}\n\t\t}\n\t\ta.modules.AddPath(expanded...)\n\t}\n\tyfiles, err := findYangFiles(a.Config.GlobalFlags.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.Config.GlobalFlags.File = make([]string, 0, len(yfiles))\n\ta.Config.GlobalFlags.File = append(a.Config.GlobalFlags.File, yfiles...)\n\tif a.Config.Debug {\n\t\tfor _, file := range a.Config.GlobalFlags.File {\n\t\t\ta.Logger.Printf(\"loading %s file\", file)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) GenerateSetRequestRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitGenerateSetRequestFlags(cmd)\n\tvar output = os.Stdout\n\tif a.Config.GenerateOutput != \"\" {\n\t\tf, err := os.OpenFile(a.Config.GenerateOutput, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\toutput = f\n\t}\n\terr := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := make(map[string]interface{})\n\tfor _, e := range a.SchemaTree.Dir {\n\t\te.FixChoice()\n\t\tnm := toMap(e, true, new(keyOpts))\n\t\tif nm == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch nm := nm.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tfor k, v := range nm {\n\t\t\t\tm[k] = v\n\t\t\t}\n\t\tdefault:\n\t\t\tm[e.Name] = nm\n\t\t}\n\t}\n\n\tsetReqFile, err := a.createSetRequestFile(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif output != os.Stdout {\n\t\terr = output.Truncate(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif a.Config.GenerateJSON {\n\t\tenc := json.NewEncoder(output)\n\t\tenc.SetIndent(\"\", \"  \")\n\t\treturn enc.Encode(setReqFile)\n\t}\n\treturn yaml.NewEncoder(output).Encode(setReqFile)\n}\n\nfunc (a *App) InitGenerateFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\t// persistent flags\n\tcmd.PersistentFlags().StringVarP(&a.Config.LocalFlags.GenerateOutput, \"output\", \"o\", \"\", \"output file, defaults to stdout\")\n\tcmd.PersistentFlags().BoolVarP(&a.Config.LocalFlags.GenerateJSON, \"json\", \"j\", false, \"generate output as JSON format instead of YAML\")\n\t// local flags\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateConfigOnly, \"config-only\", \"\", false, \"generate output from YANG config nodes only\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GeneratePath, \"path\", \"\", \"\", \"generate marshaled YANG body under specified path\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateCamelCase, \"camel-case\", \"\", false, \"convert keys to camelCase\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateSnakeCase, \"snake-case\", \"\", false, \"convert keys to snake_case\")\n\n\tcmd.Flags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) InitGenerateSetRequestFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GenerateSetRequestReplacePath, \"replace\", \"\", []string{}, \"replace path\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GenerateSetRequestUpdatePath, \"update\", \"\", []string{}, \"update path\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) generateYangSchema(files, excludes []string) error {\n\tif len(files) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, name := range files {\n\t\tif err := a.modules.Read(name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif errors := a.modules.Process(); len(errors) > 0 {\n\t\tfor _, e := range errors {\n\t\t\tfmt.Fprintf(os.Stderr, \"yang processing error: %v\\n\", e)\n\t\t}\n\t\treturn fmt.Errorf(\"yang processing failed with %d errors\", len(errors))\n\t}\n\t// Keep track of the top level modules we read in.\n\t// Those are the only modules we want to print below.\n\tmods := map[string]*yang.Module{}\n\tvar names []string\n\n\tfor _, m := range a.modules.Modules {\n\t\tif mods[m.Name] == nil {\n\t\t\tmods[m.Name] = m\n\t\t\tnames = append(names, m.Name)\n\t\t}\n\t}\n\tsort.Strings(names)\n\tentries := make([]*yang.Entry, len(names))\n\tfor x, n := range names {\n\t\tentries[x] = yang.ToEntry(mods[n])\n\t}\n\n\ta.SchemaTree = buildRootEntry()\n\texcludeRegexes := make([]*regexp.Regexp, 0, len(excludes))\n\tfor _, e := range excludes {\n\t\tr, err := regexp.Compile(e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texcludeRegexes = append(excludeRegexes, r)\n\t}\n\tfor _, entry := range entries {\n\t\tskip := false\n\t\tfor _, r := range excludeRegexes {\n\t\t\tif r.MatchString(entry.Name) {\n\t\t\t\ta.Logger.Printf(\"skipping %s\", entry.Name)\n\t\t\t\tskip = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !skip {\n\t\t\tupdateAnnotation(entry)\n\t\t\ta.SchemaTree.Dir[entry.Name] = entry\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) createSetRequestFile(m map[string]interface{}) (*config.SetRequestFile, error) {\n\tsetReqFile := &config.SetRequestFile{\n\t\tReplaces: make([]*config.UpdateItem, 0, len(a.Config.GenerateSetRequestReplacePath)),\n\t\tUpdates:  make([]*config.UpdateItem, 0, len(a.Config.GenerateSetRequestUpdatePath)),\n\t}\n\tvar enc string\n\tif strings.ToUpper(a.Config.Encoding) != \"JSON\" {\n\t\tenc = strings.ToUpper(a.Config.Encoding)\n\t}\n\tif len(a.Config.GenerateSetRequestReplacePath)+len(a.Config.GenerateSetRequestUpdatePath) == 0 {\n\t\tsortedKeys := make([]string, 0, len(m))\n\t\tfor k := range m {\n\t\t\tsortedKeys = append(sortedKeys, k)\n\t\t}\n\n\t\tsort.Strings(sortedKeys)\n\t\tfor _, n := range sortedKeys {\n\t\t\tsetReqFile.Replaces = append(setReqFile.Replaces,\n\t\t\t\t&config.UpdateItem{\n\t\t\t\t\tPath:     fmt.Sprintf(\"/%s\", n),\n\t\t\t\t\tEncoding: enc,\n\t\t\t\t\tValue:    m[n],\n\t\t\t\t})\n\t\t}\n\t\treturn setReqFile, nil\n\t}\n\tfor _, p := range a.Config.GenerateSetRequestReplacePath {\n\t\tuItem, err := pathToUpdateItem(p, m, new(keyOpts))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tuItem.Encoding = enc\n\t\tsetReqFile.Replaces = append(setReqFile.Replaces, uItem)\n\t}\n\tfor _, p := range a.Config.GenerateSetRequestUpdatePath {\n\t\tuItem, err := pathToUpdateItem(p, m, new(keyOpts))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tuItem.Encoding = enc\n\t\tsetReqFile.Updates = append(setReqFile.Updates, uItem)\n\t}\n\treturn setReqFile, nil\n}\n\nfunc buildRootEntry() *yang.Entry {\n\treturn &yang.Entry{\n\t\tName: \"root\",\n\t\tKind: yang.DirectoryEntry,\n\t\tDir:  make(map[string]*yang.Entry),\n\t\tAnnotation: map[string]interface{}{\n\t\t\t\"schemapath\": \"/\",\n\t\t\t\"root\":       true,\n\t\t},\n\t}\n}\n\n// updateAnnotation updates the schema info before encoding.\nfunc updateAnnotation(entry *yang.Entry) {\n\tfor _, child := range entry.Dir {\n\t\tupdateAnnotation(child)\n\t\tchild.Annotation = map[string]interface{}{}\n\t\tt := child.Type\n\t\tif t == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch t.Kind {\n\t\tcase yang.Ybits:\n\t\t\tnameMap := t.Bit.NameMap()\n\t\t\tbits := make([]string, 0, len(nameMap))\n\t\t\tfor bitstr := range nameMap {\n\t\t\t\tbits = append(bits, bitstr)\n\t\t\t}\n\t\t\tchild.Annotation[\"bits\"] = bits\n\t\tcase yang.Yenum:\n\t\t\tnameMap := t.Enum.NameMap()\n\t\t\tenum := make([]string, 0, len(nameMap))\n\t\t\tfor enumstr := range nameMap {\n\t\t\t\tenum = append(enum, enumstr)\n\t\t\t}\n\t\t\tchild.Annotation[\"enum\"] = enum\n\t\tcase yang.Yidentityref:\n\t\t\tidentities := make([]string, 0, len(t.IdentityBase.Values))\n\t\t\tfor i := range t.IdentityBase.Values {\n\t\t\t\tidentities = append(identities, t.IdentityBase.Values[i].PrefixedName())\n\t\t\t}\n\t\t\tchild.Annotation[\"prefix-qualified-identities\"] = identities\n\t\t}\n\t\tif t.Root != nil {\n\t\t\tchild.Annotation[\"root.type\"] = t.Root.Name\n\t\t}\n\t}\n}\n\nfunc toMap(e *yang.Entry, configOnly bool, kopts *keyOpts) interface{} {\n\tif e == nil {\n\t\treturn nil\n\t}\n\tif e.Config == yang.TSFalse && configOnly {\n\t\treturn nil\n\t}\n\tm := make(map[string]interface{})\n\tswitch {\n\tcase e.Dir == nil && e.ListAttr != nil: // leaf-list\n\t\tif e.Config == yang.TSFalse && configOnly {\n\t\t\treturn nil\n\t\t}\n\t\treturn e.Default\n\tcase e.Dir == nil: // leaf\n\t\tif e.Config == yang.TSFalse && configOnly {\n\t\t\treturn nil\n\t\t}\n\t\tif len(e.Default) > 0 {\n\t\t\treturn e.Default[0]\n\t\t}\n\t\treturn \"\"\n\tcase e.ListAttr != nil: // list\n\t\tfor n, child := range e.Dir {\n\t\t\tgChild := toMap(child, configOnly, kopts)\n\t\t\tswitch gChild := gChild.(type) {\n\t\t\tcase map[string]interface{}:\n\t\t\t\tfor k, v := range gChild {\n\t\t\t\t\tm[kopts.format(k)] = v\n\t\t\t\t}\n\t\t\tcase []interface{}, []string, string:\n\t\t\t\tm[kopts.format(n)] = gChild\n\t\t\t}\n\t\t}\n\t\treturn []interface{}{m}\n\tdefault: // container\n\t\tnm := make(map[string]interface{})\n\t\tfor n, child := range e.Dir {\n\t\t\tif child.IsCase() || child.IsChoice() {\n\t\t\t\tfor _, gchild := range child.Dir {\n\t\t\t\t\tnnm := toMap(gchild, configOnly, kopts)\n\t\t\t\t\tswitch nnm := nnm.(type) {\n\t\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\t\tif child.IsChoice() {\n\t\t\t\t\t\t\tfor k, v := range nnm {\n\t\t\t\t\t\t\t\tnm[kopts.format(k)] = v\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tcase nil:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tnm[kopts.format(n)] = nnm\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnnm := toMap(child, configOnly, kopts)\n\t\t\tif nnm == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnm[kopts.format(n)] = nnm\n\t\t}\n\t\tif e.Parent != nil && e.Parent.IsList() && !(e.IsCase() || e.IsChoice()) {\n\t\t\tm[kopts.format(e.Name)] = nm\n\t\t\treturn m\n\t\t}\n\t\tfor k, v := range nm {\n\t\t\tm[kopts.format(k)] = v\n\t\t}\n\t\treturn m\n\t}\n}\n\nfunc pathToUpdateItem(p string, m map[string]interface{}, kopts *keyOpts) (*config.UpdateItem, error) {\n\tv, err := getSubMapByPath(p, m, kopts)\n\treturn &config.UpdateItem{\n\t\tPath:  p,\n\t\tValue: v,\n\t}, err\n}\n\nfunc getSubMapByPath(p string, m map[string]interface{}, kopts *keyOpts) (interface{}, error) {\n\tif p == \"\" || p == \"/\" {\n\t\treturn m, nil\n\t}\n\t// strip path from keys if any\n\tgp, err := path.ParsePath(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse xpath %q: %v\", p, err)\n\t}\n\tpItems := make([]string, 0, len(gp.Elem))\n\tfor _, e := range gp.Elem {\n\t\tif e.Name != \"\" {\n\t\t\tpItems = append(pItems, kopts.format(e.Name))\n\t\t}\n\t}\n\t// get value body recursively from map\n\tvar rVal interface{}\n\trVal = m\n\tfor _, item := range pItems {\n\t\tswitch rValm := rVal.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tif r, ok := rValm[item]; ok {\n\t\t\t\trVal = r\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"unknown path item %q in path %q\", item, p)\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tif len(rValm) != 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"got list with more than 1 item ?\")\n\t\t\t}\n\t\t\tswitch rValmn := rValm[0].(type) {\n\t\t\tcase map[string]interface{}:\n\t\t\t\tif r, ok := rValmn[item]; ok {\n\t\t\t\t\trVal = r\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unknown path item %q in path %q\", item, p)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected sub map format @%q: %T\", item, rVal)\n\t\t}\n\t}\n\treturn rVal, nil\n}\n\n//////\n\nfunc resolveGlobs(globs []string) ([]string, error) {\n\tresults := make([]string, 0, len(globs))\n\tfor _, pattern := range globs {\n\t\tfor _, p := range strings.Split(pattern, \",\") {\n\t\t\tif strings.ContainsAny(p, `*?[`) {\n\t\t\t\t// is a glob pattern\n\t\t\t\tmatches, err := filepath.Glob(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tresults = append(results, matches...)\n\t\t\t} else {\n\t\t\t\t// is not a glob pattern ( file or dir )\n\t\t\t\tresults = append(results, p)\n\t\t\t}\n\t\t}\n\t}\n\treturn config.ExpandOSPaths(results)\n}\n\nfunc walkDir(path, ext string) ([]string, error) {\n\tfs := make([]string, 0)\n\terr := filepath.Walk(path,\n\t\tfunc(path string, _ os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfi, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tswitch mode := fi.Mode(); {\n\t\t\tcase mode.IsRegular():\n\t\t\t\tif filepath.Ext(path) == ext {\n\t\t\t\t\tfs = append(fs, path)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs, nil\n}\n\nfunc findYangFiles(files []string) ([]string, error) {\n\tyfiles := make([]string, 0, len(files))\n\tfor _, file := range files {\n\t\tfi, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\tfls, err := walkDir(file, \".yang\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tyfiles = append(yfiles, fls...)\n\t\tcase mode.IsRegular():\n\t\t\tif filepath.Ext(file) == \".yang\" {\n\t\t\t\tyfiles = append(yfiles, file)\n\t\t\t}\n\t\t}\n\t}\n\treturn yfiles, nil\n}\n"
  },
  {
    "path": "pkg/app/generatePath.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nfunc (a *App) GeneratePathPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\tif a.Config.GeneratePathSearch && a.Config.GeneratePathWithDescr {\n\t\treturn errors.New(\"flags --search and --descr cannot be used together\")\n\t}\n\tif a.Config.LocalFlags.GeneratePathPathType != \"xpath\" && a.Config.LocalFlags.GeneratePathPathType != \"gnmi\" {\n\t\treturn errors.New(\"path-type must be one of 'xpath' or 'gnmi'\")\n\t}\n\treturn nil\n}\n\nfunc (a *App) GeneratePathRunE(cmd *cobra.Command, args []string) error {\n\treturn a.PathCmdRun(\n\t\ta.Config.GlobalFlags.Dir,\n\t\ta.Config.GlobalFlags.File,\n\t\ta.Config.GlobalFlags.Exclude,\n\t\tpathGenOpts{\n\t\t\tsearch:        a.Config.LocalFlags.GeneratePathSearch,\n\t\t\twithDescr:     a.Config.LocalFlags.GeneratePathWithDescr,\n\t\t\twithTypes:     a.Config.LocalFlags.GeneratePathWithTypes,\n\t\t\twithPrefix:    a.Config.LocalFlags.GeneratePathWithPrefix,\n\t\t\tpathType:      a.Config.LocalFlags.GeneratePathPathType,\n\t\t\tstateOnly:     a.Config.LocalFlags.GeneratePathState,\n\t\t\tconfigOnly:    a.Config.LocalFlags.GeneratePathConfig,\n\t\t\tjson:          a.Config.LocalFlags.GenerateJSON,\n\t\t\twithNonLeaves: a.Config.LocalFlags.GeneratePathWithNonLeaves,\n\t\t},\n\t)\n}\n\nfunc (a *App) InitGeneratePathFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GeneratePathPathType, \"path-type\", \"\", \"xpath\", \"path type xpath or gnmi\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithDescr, \"descr\", \"\", false, \"print leaf description\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithPrefix, \"with-prefix\", \"\", false, \"include module/submodule prefix in path elements\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithTypes, \"types\", \"\", false, \"print leaf type\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathSearch, \"search\", \"\", false, \"search through path list\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathState, \"state-only\", \"\", false, \"generate paths only for YANG leafs representing state data\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathConfig, \"config-only\", \"\", false, \"generate paths only for YANG leafs representing config data\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithNonLeaves, \"with-non-leaves\", \"\", false, \"also generate paths for non-leaf nodes\")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/app/get.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc (a *App) GetPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\ta.Config.LocalFlags.GetPath = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetPath)\n\ta.Config.LocalFlags.GetModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetModel)\n\ta.Config.LocalFlags.GetProcessor = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetProcessor)\n\n\terr := a.initPluginManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta.createCollectorDialOpts()\n\treturn a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n}\n\nfunc (a *App) GetRun(cmd *cobra.Command, args []string) error {\n\tdefer a.InitGetFlags(cmd)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// setupCloseHandler(cancel)\n\ttargetsConfig, err := a.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed getting targets config: %v\", err)\n\t}\n\t_, err = a.Config.GetActions()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading actions config: %v\", err)\n\t}\n\tevps, err := a.intializeEventProcessors()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to init event processors: %v\", err)\n\t}\n\tif a.PromptMode {\n\t\t// prompt mode\n\t\tfor _, tc := range targetsConfig {\n\t\t\ta.AddTargetConfig(tc)\n\t\t}\n\t}\n\t// event format\n\tif len(a.Config.GetProcessor) > 0 {\n\t\ta.Config.Format = formatEvent\n\t}\n\tif a.Config.Format == formatEvent {\n\t\treturn a.handleGetRequestEvent(ctx, evps)\n\t}\n\t// other formats\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets*3)\n\ta.wg.Add(numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.GetRequest(ctx, tc)\n\t}\n\ta.wg.Wait()\n\terr = a.checkErrors()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *App) GetRequest(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\treq, err := a.Config.CreateGetRequest(tc)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q building Get request failed: %v\", tc.Name, err))\n\t\treturn\n\t}\n\tresponse, err := a.getRequest(ctx, tc, req)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q Get request failed: %v\", tc.Name, err))\n\t\treturn\n\t}\n\tif response == nil {\n\t\treturn\n\t}\n\terr = a.PrintMsg(tc.Name, \"Get Response:\", response)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t}\n}\n\nfunc (a *App) getRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\txreq := req\n\tif len(a.Config.LocalFlags.GetModel) > 0 {\n\t\tspModels, unspModels, err := a.filterModels(ctx, tc, a.Config.LocalFlags.GetModel)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"failed getting supported models from %q: %v\", tc.Name, err))\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(unspModels) > 0 {\n\t\t\ta.logError(fmt.Errorf(\"found unsupported models for target %q: %+v\", tc.Name, unspModels))\n\t\t}\n\t\tfor _, m := range spModels {\n\t\t\txreq.UseModels = append(xreq.UseModels, m)\n\t\t}\n\t}\n\tif a.Config.PrintRequest || a.Config.GetDryRun {\n\t\terr := a.PrintMsg(tc.Name, \"Get Request:\", req)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"target %q Get Request printing failed: %v\", tc.Name, err))\n\t\t}\n\t}\n\tif a.Config.GetDryRun {\n\t\treturn nil, nil\n\t}\n\ta.Logger.Printf(\"sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s\",\n\t\txreq.Prefix, xreq.Path, xreq.Type, xreq.Encoding, xreq.UseModels, xreq.Extension, tc.Name)\n\n\tresponse, err := a.ClientGet(ctx, tc, xreq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}\n\nfunc (a *App) getModels(ctx context.Context, tc *types.TargetConfig) ([]*gnmi.ModelData, error) {\n\tcapRsp, err := a.ClientCapabilities(ctx, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn capRsp.GetSupportedModels(), nil\n}\n\nfunc (a *App) filterModels(ctx context.Context, tc *types.TargetConfig, modelsNames []string) (map[string]*gnmi.ModelData, []string, error) {\n\tsupModels, err := a.getModels(ctx, tc)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tunsupportedModels := make([]string, 0)\n\tsupportedModels := make(map[string]*gnmi.ModelData)\n\tvar found bool\n\tfor _, m := range modelsNames {\n\t\tfound = false\n\n\t\tmodelName := m\n\t\tvar organization *string\n\t\tvar version *string\n\n\t\tif strings.Contains(modelName, \"/\") {\n\t\t\tparts := strings.SplitN(modelName, \"/\", 2)\n\t\t\torganization = &parts[0]\n\t\t\tmodelName = parts[1]\n\t\t}\n\n\t\tif strings.Contains(modelName, \":\") {\n\t\t\tparts := strings.SplitN(modelName, \":\", 2)\n\t\t\tmodelName = parts[0]\n\t\t\tversion = &parts[1]\n\t\t}\n\n\t\tfor _, tModel := range supModels {\n\t\t\tif modelName == tModel.Name && (organization == nil || *organization == tModel.Organization) && (version == nil || *version == tModel.Version) {\n\t\t\t\tsupportedModels[m] = tModel\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tunsupportedModels = append(unsupportedModels, m)\n\t\t}\n\t}\n\treturn supportedModels, unsupportedModels, nil\n}\n\n// InitGetFlags used to init or reset getCmd flags for gnmic-prompt mode\nfunc (a *App) InitGetFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GetPath, \"path\", \"\", []string{}, \"get request paths\")\n\tcmd.MarkFlagRequired(\"path\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetPrefix, \"prefix\", \"\", \"\", \"get request prefix\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.GetModel, \"model\", \"\", []string{}, \"get request models\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetType, \"type\", \"t\", \"ALL\", \"data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetTarget, \"target\", \"\", \"\", \"get request target\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GetValuesOnly, \"values-only\", \"\", false, \"print GetResponse values only\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GetProcessor, \"processor\", \"\", []string{}, \"list of processor names to run\")\n\tcmd.Flags().Uint32VarP(&a.Config.LocalFlags.GetDepth, \"depth\", \"\", 0, \"depth extension value\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.GetDryRun, \"dry-run\", \"\", false, \"prints the get request without initiating a gRPC connection\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) intializeEventProcessors() ([]formatters.EventProcessor, error) {\n\t_, err := a.Config.GetEventProcessors()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed reading event processors config: %v\", err)\n\t}\n\tvar evps = make([]formatters.EventProcessor, 0)\n\tfor _, epName := range a.Config.GetProcessor {\n\t\tif epCfg, ok := a.Config.Processors[epName]; ok {\n\t\t\tepType := \"\"\n\t\t\tfor k := range epCfg {\n\t\t\t\tepType = k\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif in, ok := formatters.EventProcessors[epType]; ok {\n\t\t\t\tep := in()\n\t\t\t\terr := ep.Init(epCfg[epType],\n\t\t\t\t\tformatters.WithLogger(a.Logger),\n\t\t\t\t\tformatters.WithTargets(a.Config.Targets),\n\t\t\t\t\tformatters.WithActions(a.Config.Actions),\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed initializing event processor '%s' of type='%s': %v\", epName, epType, err)\n\t\t\t\t}\n\t\t\t\tevps = append(evps, ep)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"%q event processor has an unknown type=%q\", epName, epType)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%q event processor not found\", epName)\n\t}\n\treturn evps, nil\n}\n\nfunc (a *App) handleGetRequestEvent(ctx context.Context, evps []formatters.EventProcessor) error {\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets*3)\n\ta.wg.Add(numTargets)\n\trsps := make(chan *getResponseEvents, numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo func(tc *types.TargetConfig) {\n\t\t\tdefer a.wg.Done()\n\t\t\treq, err := a.Config.CreateGetRequest(tc)\n\t\t\tif err != nil {\n\t\t\t\ta.errCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresp, err := a.getRequest(ctx, tc, req)\n\t\t\tif err != nil {\n\t\t\t\ta.errCh <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tevs, err := formatters.GetResponseToEventMsgs(resp, map[string]string{\"source\": tc.Name}, evps...)\n\t\t\tif err != nil {\n\t\t\t\ta.errCh <- err\n\t\t\t}\n\t\t\trsps <- &getResponseEvents{name: tc.Name, rsp: evs}\n\t\t}(tc)\n\t}\n\ta.wg.Wait()\n\tclose(rsps)\n\n\tresponses := make(map[string][]*formatters.EventMsg)\n\tfor r := range rsps {\n\t\tresponses[r.name] = r.rsp\n\t}\n\terr := a.checkErrors()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//\n\tsb := strings.Builder{}\n\tfor name, r := range responses {\n\t\tsb.Reset()\n\t\tprintPrefix := \"\"\n\t\tif len(a.Config.TargetsList()) > 1 && !a.Config.NoPrefix {\n\t\t\tprintPrefix = fmt.Sprintf(\"[%s] \", name)\n\t\t}\n\t\tb, err := json.MarshalIndent(r, \"\", \"  \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsb.Write(b)\n\t\tfmt.Fprintf(a.out, \"%s\\n\", indent(printPrefix, sb.String()))\n\t}\n\n\treturn nil\n}\n\ntype getResponseEvents struct {\n\t// target name\n\tname string\n\trsp  []*formatters.EventMsg\n}\n"
  },
  {
    "path": "pkg/app/getset.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/itchyny/gojq\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc (a *App) GetSetPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\ta.Config.LocalFlags.GetSetModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetSetModel)\n\n\ta.createCollectorDialOpts()\n\treturn a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n}\nfunc (a *App) GetSetRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitGetSetFlags(cmd)\n\n\tif a.Config.Format == formatEvent {\n\t\treturn fmt.Errorf(\"format event not supported for GetSet RPC\")\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// setupCloseHandler(cancel)\n\ttargetsConfig, err := a.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed getting targets config: %v\", err)\n\t}\n\n\tif !a.PromptMode {\n\t\tfor _, tc := range targetsConfig {\n\t\t\ta.AddTargetConfig(tc)\n\t\t}\n\t}\n\treq, err := a.Config.CreateGASGetRequest()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets*3)\n\ta.wg.Add(numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.GetSetRequest(ctx, tc, req)\n\t}\n\ta.wg.Wait()\n\treturn a.checkErrors()\n}\n\nfunc (a *App) GetSetRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) {\n\tdefer a.wg.Done()\n\txreq := req\n\tif len(a.Config.LocalFlags.GetSetModel) > 0 {\n\t\tspModels, unspModels, err := a.filterModels(ctx, tc, a.Config.LocalFlags.GetSetModel)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"failed getting supported models from %q: %v\", tc.Name, err))\n\t\t\treturn\n\t\t}\n\t\tif len(unspModels) > 0 {\n\t\t\ta.logError(fmt.Errorf(\"found unsupported models for target %q: %+v\", tc.Name, unspModels))\n\t\t}\n\t\tfor _, m := range spModels {\n\t\t\txreq.UseModels = append(xreq.UseModels, m)\n\t\t}\n\t}\n\tif a.Config.PrintRequest {\n\t\terr := a.PrintMsg(tc.Name, \"Get Request:\", req)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"target %q Get Request printing failed: %v\", tc.Name, err))\n\t\t}\n\t}\n\ta.Logger.Printf(\"sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s\",\n\t\txreq.Prefix, xreq.Path, xreq.Type, xreq.Encoding, xreq.UseModels, xreq.Extension, tc.Name)\n\tresponse, err := a.ClientGet(ctx, tc, xreq)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q get request failed: %v\", tc.Name, err))\n\t\treturn\n\t}\n\terr = a.PrintMsg(tc.Name, \"Get Response:\", response)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t}\n\t//\n\tq, err := gojq.Parse(a.Config.LocalFlags.GetSetCondition)\n\tif err != nil {\n\t\ta.logError(err)\n\t\treturn\n\t}\n\tcode, err := gojq.Compile(q)\n\tif err != nil {\n\t\ta.logError(err)\n\t\treturn\n\t}\n\tmo := formatters.MarshalOptions{Format: \"json\"}\n\tb, err := mo.Marshal(response, map[string]string{\"address\": tc.Name})\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"error marshaling message: %v\", err))\n\t\treturn\n\t}\n\tvar input interface{}\n\terr = json.Unmarshal(b, &input)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"error unmarshaling message: %v\", err))\n\t\treturn\n\t}\n\titer := code.Run(input)\n\tvar ok bool\n\tres, ok := iter.Next()\n\tif !ok {\n\t\ta.logError(fmt.Errorf(\"unexpected jq result type: %v\", res))\n\t\t// iterator not done, so the final result won't be a boolean\n\t\treturn\n\t}\n\tif err, ok = res.(error); ok {\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"condition evaluation failed: %v\", err))\n\t\t\treturn\n\t\t}\n\t}\n\tswitch res := res.(type) {\n\tcase bool:\n\t\ta.Logger.Printf(\"GetSet condition evaluated to %v\", res)\n\t\tif res {\n\t\t\tsetReq, err := a.Config.CreateGASSetRequest(input)\n\t\t\tif err != nil {\n\t\t\t\ta.logError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(setReq.Delete) == 0 && len(setReq.Replace) == 0 && len(setReq.Update) == 0 {\n\t\t\t\ta.Logger.Printf(\"empty set request\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\ta.setRequest(ctx, tc, setReq)\n\t\t}\n\t\treturn\n\tdefault:\n\t\ta.logError(errors.New(\"unexpected condition return type\"))\n\t\treturn\n\t}\n}\n\n// InitGetSetFlags used to init or reset getsetCmd flags for gnmic-prompt mode\nfunc (a *App) InitGetSetFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetGet, \"get\", \"\", \"\", \"get request paths\")\n\tcmd.MarkFlagRequired(\"get\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.GetSetModel, \"model\", \"\", []string{}, \"get request models\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetPrefix, \"prefix\", \"\", \"\", \"get request prefix\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetType, \"type\", \"t\", \"ALL\", \"data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetTarget, \"target\", \"\", \"\", \"get request target\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetCondition, \"condition\", \"\", \"any([true])\", \"condition to be met in order to execute the set request\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetUpdate, \"update\", \"\", \"\", \"set update path template, a Go template or a jq expression\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetReplace, \"replace\", \"\", \"\", \"set replace path template, a Go template or a jq expression\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetDelete, \"delete\", \"\", \"\", \"set delete path template, a Go template or a jq expression\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetValue, \"value\", \"\", \"\", \"set value template, a Go template or a jq expression\")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/app/gnmi_client.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nfunc (a *App) ClientCapabilities(ctx context.Context, tc *types.TargetConfig, ext ...*gnmi_ext.Extension) (*gnmi.CapabilityResponse, error) {\n\t// acquire writer lock\n\ta.operLock.Lock()\n\tt, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// acquire reader lock\n\ta.operLock.RLock()\n\terr = a.CreateGNMIClient(ctx, t)\n\ta.operLock.RUnlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithTimeout(ctx, t.Config.Timeout)\n\tdefer cancel()\n\tcapResponse, err := t.Capabilities(ctx, ext...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%q CapabilitiesRequest failed: %v\", t.Config.Address, err)\n\t}\n\treturn capResponse, nil\n\n}\n\nfunc (a *App) ClientGet(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\ta.operLock.Lock()\n\tt, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// acquire reader lock\n\ta.operLock.RLock()\n\terr = a.CreateGNMIClient(ctx, t)\n\ta.operLock.RUnlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithTimeout(ctx, t.Config.Timeout)\n\tdefer cancel()\n\tgetResponse, err := t.Get(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%q GetRequest failed: %v\", t.Config.Address, err)\n\t}\n\treturn getResponse, nil\n}\n\nfunc (a *App) ClientSet(ctx context.Context, tc *types.TargetConfig, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\ta.operLock.Lock()\n\tt, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// acquire reader lock\n\ta.operLock.RLock()\n\terr = a.CreateGNMIClient(ctx, t)\n\ta.operLock.RUnlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithTimeout(ctx, t.Config.Timeout)\n\tdefer cancel()\n\tsetResponse, err := t.Set(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"target %q SetRequest failed: %v\", t.Config.Name, err)\n\t}\n\treturn setResponse, nil\n}\n"
  },
  {
    "path": "pkg/app/gnmi_client_subscribe.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"google.golang.org/grpc\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n)\n\ntype subscriptionRequest struct {\n\t// subscription name\n\tname string\n\t// gNMI subscription request\n\treq *gnmi.SubscribeRequest\n}\n\nfunc (a *App) TargetSubscribeStream(ctx context.Context, tc *types.TargetConfig) {\n\tlockKey := a.targetLockKey(tc.Name)\nSTART:\n\tnctx, cancel := context.WithCancel(ctx)\n\ta.operLock.Lock()\n\tif cfn, ok := a.targetsLockFn[tc.Name]; ok {\n\t\tcfn()\n\t}\n\ta.targetsLockFn[tc.Name] = cancel\n\tt, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to initialize target %q: %v\", tc.Name, err)\n\t\treturn\n\t}\n\tselect {\n\t// check if the context was canceled before retrying\n\tcase <-nctx.Done():\n\t\treturn\n\tdefault:\n\t\tif a.locker != nil {\n\t\t\ta.Logger.Printf(\"acquiring lock for target %q\", tc.Name)\n\t\t\tok, err := a.locker.Lock(nctx, lockKey, []byte(a.Config.Clustering.InstanceName))\n\t\t\tif err == lockers.ErrCanceled {\n\t\t\t\ta.Logger.Printf(\"lock attempt for target %q canceled\", tc.Name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to lock target %q: %v\", tc.Name, err)\n\t\t\t\ttime.Sleep(a.Config.LocalFlags.SubscribeLockRetry)\n\t\t\t\tgoto START\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\ttime.Sleep(a.Config.LocalFlags.SubscribeLockRetry)\n\t\t\t\tgoto START\n\t\t\t}\n\t\t\ta.Logger.Printf(\"acquired lock for target %q\", tc.Name)\n\t\t}\n\t\ta.Logger.Printf(\"queuing target %q\", tc.Name)\n\t\ta.targetsChan <- t\n\t\ta.Logger.Printf(\"subscribing to target: %q\", tc.Name)\n\t\tgo func() {\n\t\t\terr := a.clientSubscribe(nctx, tc)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to subscribe: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\tif a.locker != nil {\n\t\t\tdoneChan, errChan := a.locker.KeepLock(nctx, lockKey)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-nctx.Done():\n\t\t\t\t\ta.Logger.Printf(\"target %q stopped: %v\", tc.Name, nctx.Err())\n\t\t\t\t\t// drain errChan\n\t\t\t\t\terr := <-errChan\n\t\t\t\t\ta.Logger.Printf(\"target %q keepLock returned: %v\", tc.Name, err)\n\t\t\t\t\treturn\n\t\t\t\tcase <-doneChan:\n\t\t\t\t\ta.Logger.Printf(\"target lock %q removed\", tc.Name)\n\t\t\t\t\treturn\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\ta.Logger.Printf(\"failed to maintain target %q lock: %v\", tc.Name, err)\n\t\t\t\t\ta.stopTarget(ctx, tc.Name)\n\t\t\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\ttime.Sleep(a.Config.LocalFlags.SubscribeLockRetry)\n\t\t\t\t\tgoto START\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *App) TargetSubscribeOnce(ctx context.Context, tc *types.TargetConfig) error {\n\tnctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\ta.operLock.Lock()\n\t_, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to initialize target %q: %v\", tc.Name, err)\n\t\treturn err\n\t}\n\ta.Logger.Printf(\"subscribing to target: %q\", tc.Name)\n\terr = a.clientSubscribeOnce(nctx, tc)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to subscribe: %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *App) TargetSubscribePoll(ctx context.Context, tc *types.TargetConfig) {\n\tnctx, cancel := context.WithCancel(ctx)\n\ta.operLock.Lock()\n\tif cfn, ok := a.targetsLockFn[tc.Name]; ok {\n\t\tcfn()\n\t}\n\ta.targetsLockFn[tc.Name] = cancel\n\t_, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to initialize target %q: %v\", tc.Name, err)\n\t\treturn\n\t}\n\ta.Logger.Printf(\"subscribing to target: %q\", tc.Name)\n\terr = a.clientSubscribe(nctx, tc)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to subscribe: %v\", err)\n\t\treturn\n\t}\n}\n\nfunc (a *App) clientSubscribe(ctx context.Context, tc *types.TargetConfig) error {\n\ta.operLock.RLock()\n\tt, ok := a.Targets[tc.Name]\n\ta.operLock.RUnlock()\n\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown target name: %q\", tc.Name)\n\t}\n\n\tsubscriptionsConfigs := t.Subscriptions\n\tif len(subscriptionsConfigs) == 0 {\n\t\tsubscriptionsConfigs = a.Config.Subscriptions\n\t}\n\tif len(subscriptionsConfigs) == 0 {\n\t\treturn fmt.Errorf(\"target %q has no subscriptions defined\", tc.Name)\n\t}\n\tsubRequests := make([]subscriptionRequest, 0, len(subscriptionsConfigs))\n\tfor scName, sc := range subscriptionsConfigs {\n\t\treq, err := utils.CreateSubscribeRequest(sc, tc, a.Config.Encoding)\n\t\tif err != nil {\n\t\t\tif errors.Is(errors.Unwrap(err), config.ErrConfig) || errors.Is(errors.Unwrap(err), api.ErrInvalidValue) {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\tsubRequests = append(subRequests, subscriptionRequest{name: scName, req: req})\n\t}\n\tif t.Cfn != nil {\n\t\tt.Cfn()\n\t}\n\tgnmiCtx, cancel := context.WithCancel(ctx)\n\tt.Cfn = cancel\nCRCLIENT:\n\tselect {\n\tcase <-gnmiCtx.Done():\n\t\treturn gnmiCtx.Err()\n\tdefault:\n\t\ttargetDialOpts := make([]grpc.DialOption, len(a.dialOpts))\n\t\tcopy(targetDialOpts, a.dialOpts)\n\t\tif a.Config.UseTunnelServer {\n\t\t\ta.ttm.Lock()\n\t\t\ta.tunTargetCfn[tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType}] = cancel\n\t\t\ta.ttm.Unlock()\n\t\t\ttargetDialOpts = append(targetDialOpts,\n\t\t\t\tgrpc.WithContextDialer(a.tunDialerFn(gnmiCtx, tc)),\n\t\t\t)\n\t\t\t// overwrite target address\n\t\t\tt.Config.Address = t.Config.Name\n\t\t}\n\t\terr := t.CreateGNMIClient(ctx, targetDialOpts...)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\t\ta.Logger.Printf(\"failed to initialize target %q timeout (%s) reached\", tc.Name, t.Config.Timeout)\n\t\t\t} else {\n\t\t\t\ta.Logger.Printf(\"failed to initialize target %q: %v\", tc.Name, err)\n\t\t\t}\n\t\t\ta.Logger.Printf(\"retrying target %q in %s\", tc.Name, t.Config.RetryTimer)\n\t\t\ttime.Sleep(t.Config.RetryTimer)\n\t\t\tgoto CRCLIENT\n\t\t}\n\t}\n\ta.Logger.Printf(\"target %q gNMI client created\", t.Config.Name)\n\n\tfor _, sreq := range subRequests {\n\t\ta.Logger.Printf(\"sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s\",\n\t\t\tsreq.req, sreq.req.GetSubscribe().GetMode(), sreq.req.GetSubscribe().GetEncoding(), t.Config.Name)\n\t\tgo t.Subscribe(gnmiCtx, sreq.req, sreq.name)\n\t}\n\treturn nil\n}\n\nfunc (a *App) clientSubscribeOnce(ctx context.Context, tc *types.TargetConfig) error {\n\ta.operLock.RLock()\n\tt, ok := a.Targets[tc.Name]\n\ta.operLock.RUnlock()\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown target name: %q\", tc.Name)\n\t}\n\n\tsubscriptionsConfigs := t.Subscriptions\n\tif len(subscriptionsConfigs) == 0 {\n\t\tsubscriptionsConfigs = a.Config.Subscriptions\n\t}\n\tif len(subscriptionsConfigs) == 0 {\n\t\treturn fmt.Errorf(\"target %q has no subscriptions defined\", tc.Name)\n\t}\n\tsubRequests := make([]subscriptionRequest, 0)\n\tfor _, sc := range subscriptionsConfigs {\n\t\treq, err := utils.CreateSubscribeRequest(sc, tc, a.Config.Encoding)\n\t\tif err != nil {\n\t\t\tif errors.Is(errors.Unwrap(err), config.ErrConfig) {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t\tsubRequests = append(subRequests, subscriptionRequest{name: sc.Name, req: req})\n\t}\n\tgnmiCtx, cancel := context.WithCancel(ctx)\n\tt.Cfn = cancel\nCRCLIENT:\n\ttargetDialOpts := a.dialOpts\n\tif a.Config.UseTunnelServer {\n\t\ta.ttm.Lock()\n\t\ta.tunTargetCfn[tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType}] = cancel\n\t\ta.ttm.Unlock()\n\t\ttargetDialOpts = append(targetDialOpts,\n\t\t\tgrpc.WithContextDialer(a.tunDialerFn(gnmiCtx, tc)),\n\t\t)\n\t\t// overwrite target address\n\t\tt.Config.Address = t.Config.Name\n\t}\n\tif err := t.CreateGNMIClient(ctx, targetDialOpts...); err != nil {\n\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\ta.Logger.Printf(\"failed to initialize target %q timeout (%s) reached\", tc.Name, t.Config.Timeout)\n\t\t} else {\n\t\t\ta.Logger.Printf(\"failed to initialize target %q: %v\", tc.Name, err)\n\t\t}\n\t\ta.Logger.Printf(\"retrying target %q in %s\", tc.Name, t.Config.RetryTimer)\n\t\ttime.Sleep(t.Config.RetryTimer)\n\t\tgoto CRCLIENT\n\n\t}\n\ta.Logger.Printf(\"target %q gNMI client created\", t.Config.Name)\nOUTER:\n\tfor _, sreq := range subRequests {\n\t\ta.Logger.Printf(\"sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s\",\n\t\t\tsreq.req, sreq.req.GetSubscribe().GetMode(), sreq.req.GetSubscribe().GetEncoding(), t.Config.Name)\n\t\trspCh, errCh := t.SubscribeOnceChan(gnmiCtx, sreq.req)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-errCh:\n\t\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\t\ta.Logger.Printf(\"target %q, subscription %q closed stream(EOF)\", t.Config.Name, sreq.name)\n\t\t\t\t\tclose(rspCh)\n\t\t\t\t\t// next subscription or end\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\tcase rsp := <-rspCh:\n\t\t\t\tm := outputs.Meta{\"source\": t.Config.Name, \"format\": a.Config.Format, \"subscription-name\": sreq.name}\n\t\t\t\ta.export(ctx, rsp, m, t.Config.Outputs...)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) clientSubscribePoll(ctx context.Context, targetName, subscriptionName string) error {\n\ta.operLock.RLock()\n\tt, ok := a.Targets[targetName]\n\ta.operLock.RUnlock()\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown target name %q\", targetName)\n\t}\n\treturn t.SubscribePoll(ctx, subscriptionName)\n}\n"
  },
  {
    "path": "pkg/app/gnmi_server.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/consul/api\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/api/server\"\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n)\n\ntype streamClient struct {\n\ttarget string\n\treq    *gnmi.SubscribeRequest\n\n\tstream  gnmi.GNMI_SubscribeServer\n\terrChan chan<- error\n}\n\nfunc (a *App) startGnmiServer() error {\n\tif a.Config.GnmiServer == nil {\n\t\ta.c = nil\n\t\treturn nil\n\t}\n\n\tvar err error\n\ta.c, err = cache.New(a.Config.GnmiServer.Cache, cache.WithLogger(a.Logger))\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to initialize gNMI cache: %v\", err)\n\t\treturn err\n\t}\n\n\ts, err := server.New(server.Config{\n\t\tAddress:              a.Config.GnmiServer.Address,\n\t\tMaxUnaryRPC:          a.Config.GnmiServer.MaxUnaryRPC,\n\t\tMaxStreamingRPC:      a.Config.GnmiServer.MaxSubscriptions,\n\t\tMaxRecvMsgSize:       a.Config.GnmiServer.MaxRecvMsgSize,\n\t\tMaxSendMsgSize:       a.Config.GnmiServer.MaxSendMsgSize,\n\t\tMaxConcurrentStreams: a.Config.GnmiServer.MaxConcurrentStreams,\n\t\tTCPKeepalive:         a.Config.GnmiServer.TCPKeepalive,\n\t\tKeepalive:            a.Config.GnmiServer.GRPCKeepalive.Convert(),\n\t\tRateLimit:            a.Config.GnmiServer.RateLimit,\n\t\tTimeout:              a.Config.GnmiServer.Timeout,\n\t\tHealthEnabled:        true,\n\t\tTLS:                  a.Config.GnmiServer.TLS,\n\t}, server.WithLogger(a.Logger),\n\t\tserver.WithGetHandler(a.serverGetHandler),\n\t\tserver.WithSetHandler(a.serverSetHandler),\n\t\tserver.WithSubscribeHandler(a.serverSubscribeHandler),\n\t\tserver.WithRegistry(a.reg),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(a.ctx)\n\n\tgo a.registerGNMIServer(ctx)\n\tgo func() {\n\t\tdefer cancel()\n\t\terr := s.Start(ctx)\n\t\tif err != nil {\n\t\t\ta.Logger.Print(err)\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (a *App) registerGNMIServer(ctx context.Context, defaultTags ...string) {\n\tif a.Config.GnmiServer.ServiceRegistration == nil {\n\t\treturn\n\t}\n\tvar err error\n\tclientConfig := &api.Config{\n\t\tAddress:    a.Config.GnmiServer.ServiceRegistration.Address,\n\t\tScheme:     \"http\",\n\t\tDatacenter: a.Config.GnmiServer.ServiceRegistration.Datacenter,\n\t\tToken:      a.Config.GnmiServer.ServiceRegistration.Token,\n\t}\n\tif a.Config.GnmiServer.ServiceRegistration.Username != \"\" && a.Config.GnmiServer.ServiceRegistration.Password != \"\" {\n\t\tclientConfig.HttpAuth = &api.HttpBasicAuth{\n\t\t\tUsername: a.Config.GnmiServer.ServiceRegistration.Username,\n\t\t\tPassword: a.Config.GnmiServer.ServiceRegistration.Password,\n\t\t}\n\t}\nINITCONSUL:\n\tconsulClient, err := api.NewClient(clientConfig)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to connect to consul: %v\", err)\n\t\ttime.Sleep(1 * time.Second)\n\t\tgoto INITCONSUL\n\t}\n\tself, err := consulClient.Agent().Self()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to connect to consul: %v\", err)\n\t\ttime.Sleep(1 * time.Second)\n\t\tgoto INITCONSUL\n\t}\n\tif cfg, ok := self[\"Config\"]; ok {\n\t\tb, _ := json.Marshal(cfg)\n\t\ta.Logger.Printf(\"consul agent config: %s\", string(b))\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\th, p, err := net.SplitHostPort(a.Config.GnmiServer.Address)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to split host and port from gNMI server address %q: %v\", a.Config.GnmiServer.Address, err)\n\t\treturn\n\t}\n\tpi, _ := strconv.Atoi(p)\n\tservice := &api.AgentServiceRegistration{\n\t\tID:      a.Config.InstanceName,\n\t\tName:    a.Config.GnmiServer.ServiceRegistration.Name,\n\t\tAddress: h,\n\t\tPort:    pi,\n\t\tTags:    append(defaultTags, a.Config.GnmiServer.ServiceRegistration.Tags...),\n\t\tChecks: api.AgentServiceChecks{\n\t\t\t{\n\t\t\t\tTTL:                            a.Config.GnmiServer.ServiceRegistration.CheckInterval.String(),\n\t\t\t\tDeregisterCriticalServiceAfter: a.Config.GnmiServer.ServiceRegistration.DeregisterAfter,\n\t\t\t},\n\t\t},\n\t}\n\tif a.Config.Clustering != nil {\n\t\tif a.Config.Clustering.InstanceName != \"\" {\n\t\t\tservice.ID = a.Config.Clustering.InstanceName\n\t\t}\n\t\tservice.Name = a.Config.Clustering.ClusterName + \"-gnmi-server\"\n\t\tif service.Tags == nil {\n\t\t\tservice.Tags = make([]string, 0)\n\t\t}\n\t\tservice.Tags = append(service.Tags, fmt.Sprintf(\"cluster-name=%s\", a.Config.Clustering.ClusterName))\n\t}\n\tif service.ID == \"\" {\n\t\tservice.ID = service.Name\n\t}\n\tservice.Tags = append(service.Tags, fmt.Sprintf(\"instance-name=%s\", service.ID))\n\tttlCheckID := \"service:\" + service.ID\n\tb, _ := json.Marshal(service)\n\ta.Logger.Printf(\"registering service: %s\", string(b))\n\terr = consulClient.Agent().ServiceRegister(service)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to register service in consul: %v\", err)\n\t\treturn\n\t}\n\n\terr = consulClient.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to update TTL check to Passing: %v\", err)\n\t}\n\tticker := time.NewTicker(a.Config.GnmiServer.ServiceRegistration.CheckInterval / 2)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terr = consulClient.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to update TTL check to Passing: %v\", err)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\terr = consulClient.Agent().UpdateTTL(ttlCheckID, ctx.Err().Error(), api.HealthCritical)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to update TTL check to Critical: %v\", err)\n\t\t\t}\n\t\t\tticker.Stop()\n\t\t\tgoto INITCONSUL\n\t\t}\n\t}\n}\n\nfunc (a *App) handleONCESubscriptionRequest(sc *streamClient) {\n\tvar err error\n\ta.Logger.Printf(\"processing subscription to target %q\", sc.target)\n\tpaths := make([]*gnmi.Path, 0)\n\n\tswitch req := sc.req.GetRequest().(type) {\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tpr := req.Subscribe.GetPrefix()\n\t\tfor _, sub := range req.Subscribe.GetSubscription() {\n\t\t\tpaths = append(paths,\n\t\t\t\t&gnmi.Path{\n\t\t\t\t\tOrigin: pr.GetOrigin(),\n\t\t\t\t\tTarget: pr.GetTarget(),\n\t\t\t\t\tElem:   append(pr.GetElem(), sub.GetPath().GetElem()...),\n\t\t\t\t})\n\t\t}\n\t}\n\t//\n\tro := &cache.ReadOpts{\n\t\tTarget:      sc.target,\n\t\tPaths:       paths,\n\t\tMode:        \"once\",\n\t\tUpdatesOnly: sc.req.GetSubscribe().GetUpdatesOnly(),\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"error processing subscription to target %q: %v\", sc.target, err)\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\ta.Logger.Printf(\"subscription request to target %q processed\", sc.target)\n\t}()\n\n\tfor n := range a.c.Subscribe(sc.stream.Context(), ro) {\n\t\tif n.Err != nil {\n\t\t\terr = n.Err\n\t\t\treturn\n\t\t}\n\t\terr = sc.stream.Send(&gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\tUpdate: n.Notification,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *App) handleStreamSubscriptionRequest(sc *streamClient) {\n\tpeer, _ := peer.FromContext(sc.stream.Context())\n\n\terrChan := make(chan error)\n\tdefer close(errChan)\n\n\t// this context is required to signal this goroutine and `handleSampledQuery` goroutine that error has happened in cache\n\tctx, cancel := context.WithCancel(sc.stream.Context())\n\ta.Logger.Printf(\"processing STREAM subscription from %q to target %q\", peer.Addr, sc.target)\n\n\tgo func() {\n\t\tdefer close(sc.errChan)\n\n\t\tfor err := range errChan {\n\t\t\tif err == nil {\n\t\t\t\ta.Logger.Printf(\"subscription request from %q to target %q processed\", peer.Addr, sc.target)\n\t\t\t} else if errors.Is(err, context.Canceled) {\n\t\t\t\ta.Logger.Printf(\"subscription to target %q canceled\", sc.target)\n\t\t\t\tsc.errChan <- err\n\t\t\t\tcancel()\n\t\t\t} else {\n\t\t\t\ta.Logger.Printf(\"error processing STREAM subscription to target %q: %v\", sc.target, err)\n\t\t\t\tsc.errChan <- err\n\t\t\t\tcancel()\n\t\t\t}\n\t\t}\n\t}()\n\n\tif sc.req.GetSubscribe().GetUpdatesOnly() {\n\t\terr := sc.stream.Send(&gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true},\n\t\t})\n\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t}\n\tvar pr *gnmi.Path\n\tswitch req := sc.req.GetRequest().(type) {\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tpr = req.Subscribe.GetPrefix()\n\t}\n\n\tsubs := sc.req.GetSubscribe().GetSubscription()\n\twg := new(sync.WaitGroup)\n\twg.Add(len(subs))\n\n\tfor i, sub := range subs {\n\t\ta.Logger.Printf(\"handling subscriptionList item[%d]: target %q, %q\", i, sc.target, sub.String())\n\n\t\tgo func(sub *gnmi.Subscription) {\n\t\t\tdefer wg.Done()\n\t\t\tvar ro *cache.ReadOpts\n\n\t\t\tswitch sub.GetMode() {\n\t\t\tcase gnmi.SubscriptionMode_ON_CHANGE, gnmi.SubscriptionMode_TARGET_DEFINED:\n\t\t\t\tro = &cache.ReadOpts{\n\t\t\t\t\tTarget: sc.target,\n\t\t\t\t\tPaths: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tOrigin: pr.GetOrigin(),\n\t\t\t\t\t\t\tTarget: pr.GetTarget(),\n\t\t\t\t\t\t\tElem:   append(pr.GetElem(), sub.GetPath().GetElem()...),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tMode:              cache.ReadMode_StreamOnChange,\n\t\t\t\t\tHeartbeatInterval: time.Duration(sub.GetHeartbeatInterval()),\n\t\t\t\t\tSuppressRedundant: sub.GetSuppressRedundant(),\n\t\t\t\t\tUpdatesOnly:       sc.req.GetSubscribe().GetUpdatesOnly(),\n\t\t\t\t}\n\t\t\tcase gnmi.SubscriptionMode_SAMPLE:\n\t\t\t\tperiod := time.Duration(sub.GetSampleInterval())\n\t\t\t\tif period == 0 {\n\t\t\t\t\tperiod = a.Config.GnmiServer.DefaultSampleInterval\n\t\t\t\t} else if period < a.Config.GnmiServer.MinSampleInterval {\n\t\t\t\t\tperiod = a.Config.GnmiServer.MinSampleInterval\n\t\t\t\t}\n\t\t\t\tro = &cache.ReadOpts{\n\t\t\t\t\tTarget: sc.target,\n\t\t\t\t\tPaths: []*gnmi.Path{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tOrigin: pr.GetOrigin(),\n\t\t\t\t\t\t\tTarget: pr.GetTarget(),\n\t\t\t\t\t\t\tElem:   append(pr.GetElem(), sub.GetPath().GetElem()...),\n\t\t\t\t\t\t}},\n\t\t\t\t\tMode:              cache.ReadMode_StreamSample,\n\t\t\t\t\tSampleInterval:    period,\n\t\t\t\t\tHeartbeatInterval: time.Duration(sub.GetHeartbeatInterval()),\n\t\t\t\t\tSuppressRedundant: sub.GetSuppressRedundant(),\n\t\t\t\t\tUpdatesOnly:       sc.req.GetSubscribe().GetUpdatesOnly(),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ta.Logger.Printf(\"cache subscribe: %+v\", ro)\n\n\t\t\tfor n := range a.c.Subscribe(ctx, ro) {\n\t\t\t\t// `errChan <- n.Err` should trigger the gnmi-server side cleanup\n\t\t\t\t// only wait would be for the cache to close the channel\n\t\t\t\tif n.Err != nil {\n\t\t\t\t\terrChan <- n.Err\n\t\t\t\t\ta.Logger.Printf(\"cache subscribe failed: %+v: %v\", ro, n.Err)\n\n\t\t\t\t\t// reader should only stop once the channel is closed by sender or otherwise\n\t\t\t\t\t// it coould block the senders who doesn't know that error has happened\n\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr := sc.stream.Send(&gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: n.Notification,\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- n.Err\n\t\t\t\t}\n\t\t\t}\n\t\t}(sub)\n\t}\n\n\t// wait for ctx to be done\n\t<-ctx.Done()\n\terrChan <- ctx.Err()\n\twg.Wait()\n}\n\nfunc (a *App) handlePolledSubscription(sc *streamClient) {\n\tdefer close(sc.errChan)\n\ta.handleONCESubscriptionRequest(sc)\n\tsc.errChan <- sc.stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{\n\t\tSyncResponse: true,\n\t}})\n\t// var err error\n\tfor {\n\t\treq, err := sc.stream.Recv()\n\t\tif errors.Is(err, io.EOF) {\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\tswitch req := req.Request.(type) {\n\t\tcase *gnmi.SubscribeRequest_Poll:\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unexpected request type: expecting a Poll request, rcvd: %v\", req)\n\t\t\ta.Logger.Print(err)\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"target %q: failed poll subscription rcv: %v\", sc.target, err)\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\ta.Logger.Printf(\"target %q: repoll\", sc.target)\n\t\ta.handleONCESubscriptionRequest(sc)\n\t\tsc.errChan <- sc.stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{\n\t\t\tSyncResponse: true,\n\t\t}})\n\t\ta.Logger.Printf(\"target %q: repoll done\", sc.target)\n\t}\n}\n\n////\n\nfunc (a *App) handlegNMIcInternalGet(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\tnotifications := make([]*gnmi.Notification, 0, len(req.GetPath()))\n\ta.configLock.RLock()\n\tdefer a.configLock.RUnlock()\n\n\tfor _, p := range req.GetPath() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t\telems := path.PathElems(req.GetPrefix(), p)\n\t\t\tns, err := a.handlegNMIGetPath(elems, req.GetEncoding())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnotifications = append(notifications, ns...)\n\t\t}\n\t}\n\treturn &gnmi.GetResponse{Notification: notifications}, nil\n}\n\nfunc (a *App) handlegNMIGetPath(elems []*gnmi.PathElem, enc gnmi.Encoding) ([]*gnmi.Notification, error) {\n\tnotifications := make([]*gnmi.Notification, 0, len(elems))\n\tfor _, e := range elems {\n\t\tswitch e.Name {\n\t\t// case \"\":\n\t\tcase \"targets\":\n\t\t\tif e.Key != nil {\n\t\t\t\tif _, ok := e.Key[\"name\"]; ok {\n\t\t\t\t\tfor _, tc := range a.Config.Targets {\n\t\t\t\t\t\tif tc.Name == e.Key[\"name\"] {\n\t\t\t\t\t\t\tnotifications = append(notifications, targetConfigToNotification(tc, enc))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// no keys\n\t\t\tfor _, tc := range a.Config.Targets {\n\t\t\t\tnotifications = append(notifications, targetConfigToNotification(tc, enc))\n\t\t\t}\n\t\tcase \"subscriptions\":\n\t\t\tif e.Key != nil {\n\t\t\t\tif _, ok := e.Key[\"name\"]; ok {\n\t\t\t\t\tfor _, sub := range a.Config.Subscriptions {\n\t\t\t\t\t\tif sub.Name == e.Key[\"name\"] {\n\t\t\t\t\t\t\tnotifications = append(notifications, subscriptionConfigToNotification(sub, enc))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// no keys\n\t\t\tfor _, sub := range a.Config.Subscriptions {\n\t\t\t\tnotifications = append(notifications, subscriptionConfigToNotification(sub, enc))\n\t\t\t}\n\t\t// case \"outputs\":\n\t\t// case \"inputs\":\n\t\t// case \"processors\":\n\t\t// case \"clustering\":\n\t\t// case \"gnmi-server\":\n\t\tdefault:\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"unknown path element %q\", e.Name)\n\t\t}\n\t}\n\treturn notifications, nil\n}\n\nfunc targetConfigToNotification(tc *types.TargetConfig, e gnmi.Encoding) *gnmi.Notification {\n\tswitch e {\n\tcase gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF:\n\t\tb, _ := json.Marshal(tc)\n\t\tn := &gnmi.Notification{\n\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tOrigin: \"gnmic\",\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"target\",\n\t\t\t\t\t\t\t\tKey:  map[string]string{\"name\": tc.Name},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{JsonVal: b},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn n\n\tcase gnmi.Encoding_BYTES:\n\t\tn := &gnmi.Notification{\n\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tOrigin: \"gnmic\",\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"target\",\n\t\t\t\t\t\tKey:  map[string]string{\"name\": tc.Name},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"address\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.Address)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif tc.Username != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"username\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(*tc.Username)},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.Insecure != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"insecure\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(fmt.Sprint(*tc.Insecure))},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.SkipVerify != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"skip-verify\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(fmt.Sprint(*tc.SkipVerify))},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"timeout\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.Timeout.String())},\n\t\t\t},\n\t\t})\n\t\tif tc.TLSCA != nil && *tc.TLSCA != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-ca\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte((tc.TLSCAString()))},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.TLSCert != nil && *tc.TLSCert != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-cert\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.TLSCertString())},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.TLSKey != nil && *tc.TLSKey != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-key\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.TLSKeyString())},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif len(tc.Outputs) > 0 {\n\t\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\t\tfor _, out := range tc.Outputs {\n\t\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(out)},\n\t\t\t\t})\n\t\t\t}\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"outputs\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif len(tc.Subscriptions) > 0 {\n\t\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\t\tfor _, sub := range tc.Subscriptions {\n\t\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_BytesVal{BytesVal: []byte(sub)},\n\t\t\t\t})\n\t\t\t}\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"subscriptions\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\treturn n\n\tcase gnmi.Encoding_ASCII:\n\t\tn := &gnmi.Notification{\n\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tOrigin: \"gnmic\",\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"target\",\n\t\t\t\t\t\tKey:  map[string]string{\"name\": tc.Name},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"address\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Address},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif tc.Username != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"username\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: *tc.Username},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.Insecure != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"insecure\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.Insecure)},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.SkipVerify != nil {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"skip-verify\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.SkipVerify)},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"timeout\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Timeout.String()},\n\t\t\t},\n\t\t})\n\t\tif tc.TLSCA != nil && *tc.TLSCA != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-ca\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCAString()},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.TLSCert != nil && *tc.TLSCert != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-cert\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCertString()},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif tc.TLSKey != nil && *tc.TLSKey != \"\" {\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"tls-key\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSKeyString()},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif len(tc.Outputs) > 0 {\n\t\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\t\tfor _, out := range tc.Outputs {\n\t\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: out},\n\t\t\t\t})\n\t\t\t}\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"outputs\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tif len(tc.Subscriptions) > 0 {\n\t\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\t\tfor _, sub := range tc.Subscriptions {\n\t\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: sub},\n\t\t\t\t})\n\t\t\t}\n\t\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"subscriptions\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\treturn n\n\t}\n\treturn nil\n}\n\nfunc subscriptionConfigToNotification(sub *types.SubscriptionConfig, e gnmi.Encoding) *gnmi.Notification {\n\tswitch e {\n\tcase gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF:\n\t\tb, _ := json.Marshal(sub)\n\t\tn := &gnmi.Notification{\n\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tOrigin: \"gnmic\",\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"subscriptions\",\n\t\t\t\t\t\t\t\tKey:  map[string]string{\"name\": sub.Name},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{JsonVal: b},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn n\n\tcase gnmi.Encoding_BYTES:\n\tcase gnmi.Encoding_ASCII:\n\t}\n\treturn nil\n}\n\nfunc (a *App) serverGetHandler(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\tnumPaths := len(req.GetPath())\n\tif numPaths == 0 && req.GetPrefix() == nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing path\")\n\t}\n\n\torigins := make(map[string]struct{})\n\tfor _, p := range req.GetPath() {\n\t\torigins[p.GetOrigin()] = struct{}{}\n\t\tif p.GetOrigin() != \"gnmic\" {\n\t\t\tif _, ok := origins[\"gnmic\"]; ok {\n\t\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"combining `gnmic` origin with other origin values is not supported\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := origins[\"gnmic\"]; ok {\n\t\treturn a.handlegNMIcInternalGet(ctx, req)\n\t}\n\n\ttargetName := req.GetPrefix().GetTarget()\n\tpr, _ := peer.FromContext(ctx)\n\ta.Logger.Printf(\"received Get request from %q to target %q\", pr.Addr, targetName)\n\n\ttargets, err := a.selectTargets(ctx, targetName)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"could not find targets: %v\", err)\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target %q\", targetName)\n\t}\n\tresults := make(chan *gnmi.Notification)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.GetResponse{\n\t\t// assume one notification per path per target\n\t\tNotification: make([]*gnmi.Notification, 0, numTargets*numPaths),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase notif, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Notification = append(response.Notification, notif)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcreq := proto.Clone(req).(*gnmi.GetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Get(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, n := range res.GetNotification() {\n\t\t\t\tif n.GetPrefix() == nil {\n\t\t\t\t\tn.Prefix = new(gnmi.Path)\n\t\t\t\t}\n\t\t\t\tif n.GetPrefix().GetTarget() == \"\" {\n\t\t\t\t\tn.Prefix.Target = name\n\t\t\t\t}\n\t\t\t\tresults <- n\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\tif a.Config.Debug {\n\t\ta.Logger.Printf(\"sending GetResponse to %q: %+v\", pr.Addr, response)\n\t}\n\treturn response, nil\n}\n\nfunc (a *App) serverSetHandler(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\tnumUpdates := len(req.GetUpdate())\n\tnumReplaces := len(req.GetReplace())\n\tnumDeletes := len(req.GetDelete())\n\tnumUnionReplace := len(req.GetUnionReplace())\n\tif numUpdates+numReplaces+numDeletes+numUnionReplace == 0 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing update/replace/delete path(s)\")\n\t}\n\n\ttargetName := req.GetPrefix().GetTarget()\n\tpr, _ := peer.FromContext(ctx)\n\ta.Logger.Printf(\"received Set request from %q to target %q\", pr.Addr, targetName)\n\n\ttargets, err := a.selectTargets(ctx, targetName)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"could not find targets: %v\", err)\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target(s) %q\", targetName)\n\t}\n\tresults := make(chan *gnmi.UpdateResult)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.SetResponse{\n\t\t// assume one update per target, per update/replace/delete\n\t\tResponse: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes)),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase upd, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tresponse.Timestamp = time.Now().UnixNano()\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Response = append(response.Response, upd)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcreq := proto.Clone(req).(*gnmi.SetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Set(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, upd := range res.GetResponse() {\n\t\t\t\tupd.Path.Target = name\n\t\t\t\tresults <- upd\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\ta.Logger.Printf(\"sending SetResponse to %q: %+v\", pr.Addr, response)\n\treturn response, nil\n}\n\nfunc (a *App) serverSubscribeHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error {\n\tpr, _ := peer.FromContext(stream.Context())\n\tsc := &streamClient{\n\t\tstream: stream,\n\t\treq:    req,\n\t}\n\tsc.target = sc.req.GetSubscribe().GetPrefix().GetTarget()\n\tif sc.target == \"\" {\n\t\tsc.target = \"*\"\n\t\tsub := sc.req.GetSubscribe()\n\t\tif sub.GetPrefix() == nil {\n\t\t\tsub.Prefix = &gnmi.Path{Target: \"*\"}\n\t\t} else {\n\t\t\tsub.Prefix.Target = \"*\"\n\t\t}\n\t}\n\n\ta.Logger.Printf(\"received a subscribe request mode=%v from %q for target %q\", sc.req.GetSubscribe().GetMode(), pr.Addr, sc.target)\n\tdefer a.Logger.Printf(\"subscription from peer %q terminated\", pr.Addr)\n\n\t// closing of this channel is handled by respective goroutines that are going to send error on this channel\n\terrChan := make(chan error, len(sc.req.GetSubscribe().GetSubscription()))\n\tsc.errChan = errChan // send-only\n\n\tswitch sc.req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_ONCE:\n\t\tgo func() {\n\t\t\ta.handleONCESubscriptionRequest(sc)\n\t\t\terrChan <- sc.stream.Send(&gnmi.SubscribeResponse{\n\t\t\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true},\n\t\t\t})\n\t\t\tclose(errChan)\n\t\t}()\n\n\tcase gnmi.SubscriptionList_POLL:\n\t\tgo a.handlePolledSubscription(sc)\n\tcase gnmi.SubscriptionList_STREAM:\n\t\tgo a.handleStreamSubscriptionRequest(sc)\n\tdefault:\n\t\treturn status.Errorf(codes.InvalidArgument, \"unrecognized subscription mode: %v\", sc.req.GetSubscribe().GetMode())\n\t}\n\n\t// flushing the errChan\n\tdefer func() {\n\t\ta.Logger.Printf(\"flushing subscription errChan\")\n\t\tfor range errChan {\n\t\t}\n\t}()\n\n\t// returning first non-nil error and flushing rest in defer\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/inputs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n)\n\nfunc (a *App) InitInput(ctx context.Context, name string, tcs map[string]*types.TargetConfig) {\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\tif _, ok := a.Inputs[name]; ok {\n\t\treturn\n\t}\n\tif cfg, ok := a.Config.Inputs[name]; ok {\n\t\tif inputType, ok := cfg[\"type\"]; ok {\n\t\t\ta.Logger.Printf(\"starting input type %s\", inputType)\n\t\t\tif initializer, ok := inputs.Inputs[inputType.(string)]; ok {\n\t\t\t\tin := initializer()\n\t\t\t\tgo func() {\n\t\t\t\t\terr := in.Start(ctx, name, cfg,\n\t\t\t\t\t\tinputs.WithLogger(a.Logger),\n\t\t\t\t\t\tinputs.WithName(a.Config.InstanceName),\n\t\t\t\t\t\tinputs.WithOutputs(a.Outputs),\n\t\t\t\t\t\tinputs.WithConfigStore(a.Store),\n\t\t\t\t\t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ta.Logger.Printf(\"failed to init input type %q: %v\", inputType, err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\ta.operLock.Lock()\n\t\t\t\ta.Inputs[name] = in\n\t\t\t\ta.operLock.Unlock()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (a *App) InitInputs(ctx context.Context) {\n\tfor name := range a.Config.Inputs {\n\t\ta.InitInput(ctx, name, a.Config.Targets)\n\t}\n}\n"
  },
  {
    "path": "pkg/app/loaders.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nfunc (a *App) startLoader(ctx context.Context) {\n\tif len(a.Config.Loader) == 0 {\n\t\treturn\n\t}\n\tif a.inCluster() {\n\t\tticker := time.NewTicker(time.Second)\n\t\t// wait for instance to become the leader\n\t\tfor range ticker.C {\n\t\t\tif a.isLeader {\n\t\t\t\tticker.Stop()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tldTypeS := a.Config.Loader[\"type\"].(string)\nSTART:\n\ta.Logger.Printf(\"initializing loader type %q\", ldTypeS)\n\tvar fnTargetsDefaults func(tc *types.TargetConfig) error\n\tif expandEnv, ok := a.Config.Loader[\"expand-env\"].(bool); ok && expandEnv {\n\t\tfnTargetsDefaults = a.Config.SetTargetConfigDefaultsExpandEnv\n\t} else {\n\t\tfnTargetsDefaults = a.Config.SetTargetConfigDefaults\n\t}\n\tld := loaders.Loaders[ldTypeS]()\n\terr := ld.Init(ctx, a.Config.Loader, a.Logger,\n\t\tloaders.WithRegistry(a.reg),\n\t\tloaders.WithActions(a.Config.Actions),\n\t\tloaders.WithTargetsDefaults(fnTargetsDefaults),\n\t)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to init loader type %q: %v\", ldTypeS, err)\n\t\treturn\n\t}\n\ta.Logger.Printf(\"starting loader type %q\", ldTypeS)\n\tfor targetOp := range ld.Start(ctx) {\n\t\t// do deletes first, since target change equates to delete+add\n\t\tfor _, del := range targetOp.Del {\n\t\t\t// not clustered, delete local target\n\t\t\tif !a.inCluster() {\n\t\t\t\terr = a.DeleteTarget(ctx, del)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.Logger.Printf(\"failed deleting target %q: %v\", del, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// clustered, delete target in all instances of the cluster\n\t\t\terr = a.deleteTarget(ctx, del)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to delete target %q: %v\", del, err)\n\t\t\t}\n\t\t}\n\t\tvar limiter *time.Ticker\n\t\tif a.Config.LocalFlags.SubscribeBackoff > 0 {\n\t\t\tlimiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)\n\t\t}\n\t\tfor _, add := range targetOp.Add {\n\t\t\terr = fnTargetsDefaults(add)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed parsing new target configuration %s: %v\", add, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// not clustered, add target and subscribe\n\t\t\tif !a.inCluster() {\n\t\t\t\ta.Config.Targets[add.Name] = add\n\t\t\t\ta.AddTargetConfig(add)\n\t\t\t\ta.wg.Add(1)\n\t\t\t\tgo a.TargetSubscribeStream(ctx, add)\n\t\t\t\tif limiter != nil {\n\t\t\t\t\t<-limiter.C\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// clustered, dispatch\n\t\t\ta.configLock.Lock()\n\t\t\ta.Config.Targets[add.Name] = add\n\t\t\terr = a.dispatchTarget(ctx, add)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed dispatching target %q: %v\", add.Name, err)\n\t\t\t}\n\t\t\ta.configLock.Unlock()\n\t\t}\n\t\tif limiter != nil {\n\t\t\tlimiter.Stop()\n\t\t}\n\t}\n\ta.Logger.Printf(\"target loader stopped\")\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tgoto START\n\t}\n}\n\nfunc (a *App) startLoaderProxy(ctx context.Context) {\n\tif len(a.Config.Loader) == 0 {\n\t\treturn\n\t}\n\tldTypeS := a.Config.Loader[\"type\"].(string)\nSTART:\n\ta.Logger.Printf(\"initializing loader type %q\", ldTypeS)\n\n\tvar fnTargetsDefaults func(tc *types.TargetConfig) error\n\tif expandEnv, ok := a.Config.Loader[\"expand-env\"].(bool); ok && expandEnv {\n\t\tfnTargetsDefaults = a.Config.SetTargetConfigDefaultsExpandEnv\n\t} else {\n\t\tfnTargetsDefaults = a.Config.SetTargetConfigDefaults\n\t}\n\n\tld := loaders.Loaders[ldTypeS]()\n\terr := ld.Init(ctx, a.Config.Loader, a.Logger,\n\t\tloaders.WithRegistry(a.reg),\n\t\tloaders.WithActions(a.Config.Actions),\n\t\tloaders.WithTargetsDefaults(fnTargetsDefaults),\n\t)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to init loader type %q: %v\", ldTypeS, err)\n\t\treturn\n\t}\n\ta.Logger.Printf(\"starting loader type %q\", ldTypeS)\n\tfor targetOp := range ld.Start(ctx) {\n\t\t// do deletes first since target change is delete+add\n\t\tfor _, del := range targetOp.Del {\n\t\t\t// clustered, delete target in all instances of the cluster\n\t\t\ta.operLock.Lock()\n\t\t\tt, ok := a.Targets[del]\n\t\t\tif ok {\n\t\t\t\terr = t.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.Logger.Printf(\"failed to stop target %s: %v\", del, err)\n\t\t\t\t}\n\t\t\t\tdelete(a.Targets, del)\n\t\t\t}\n\t\t\ta.operLock.Unlock()\n\t\t}\n\t\tfor _, add := range targetOp.Add {\n\t\t\terr = fnTargetsDefaults(add)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed parsing new target configuration %s: %v\", add, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ta.configLock.Lock()\n\t\t\ta.Config.Targets[add.Name] = add\n\t\t\ta.configLock.Unlock()\n\t\t}\n\t}\n\ta.Logger.Printf(\"target loader stopped\")\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tgoto START\n\t}\n}\n"
  },
  {
    "path": "pkg/app/locker.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport \"fmt\"\n\nfunc (a *App) targetLockKey(s string) string {\n\tif a.Config.Clustering == nil {\n\t\treturn s\n\t}\n\tif s == \"\" {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"gnmic/%s/targets/%s\", a.Config.Clustering.ClusterName, s)\n}\n"
  },
  {
    "path": "pkg/app/logging.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc (a *App) logError(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\ta.Logger.Print(err)\n\tif !a.Config.Log {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n\tif a.errCh == nil {\n\t\treturn\n\t}\n\ta.errCh <- err\n}\n\nfunc (a *App) checkErrors() error {\n\tif a.errCh == nil {\n\t\treturn nil\n\t}\n\tclose(a.errCh)\n\terrs := make([]error, 0)\n\tfor err := range a.errCh {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\tif a.Config.Log {\n\t\tfor _, err := range errs {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t}\n\treturn errors.New(\"one or more requests failed\")\n}\n"
  },
  {
    "path": "pkg/app/metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tclusterMetricsUpdatePeriod = 10 * time.Second\n)\n\n// subscribe\nvar subscribeResponseReceivedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"subscribe\",\n\tName:      \"number_of_received_subscribe_response_messages_total\",\n\tHelp:      \"Total number of received subscribe response messages\",\n}, []string{\"source\", \"subscription\"})\n\nvar subscribeResponseFailedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"subscribe\",\n\tName:      \"number_of_failed_subscribe_request_messages_total\",\n\tHelp:      \"Total number of failed subscribe requests\",\n}, []string{\"source\", \"subscription\"})\n\n// target\nvar targetUPMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"target\",\n\tName:      \"up\",\n\tHelp:      \"Has value 1 if the gNMI connection to the target is established; otherwise, 0.\",\n}, []string{\"name\"})\n\nvar targetConnStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"target\",\n\tName:      \"connection_state\",\n\tHelp:      \"The current gRPC connection state to the target. The value can be one of the following: 0(UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN).\",\n}, []string{\"name\"})\n\n// cluster\nvar clusterNumberOfLockedTargets = prometheus.NewGauge(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"cluster\",\n\tName:      \"number_of_locked_targets\",\n\tHelp:      \"number of locked targets\",\n})\nvar clusterIsLeader = prometheus.NewGauge(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"cluster\",\n\tName:      \"is_leader\",\n\tHelp:      \"Has value 1 if this gnmic instance is the cluster leader, 0 otherwise\",\n})\n\nfunc (a *App) registerTargetMetrics() {\n\terr := a.reg.Register(targetUPMetric)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to register target metric: %v\", err)\n\t}\n\terr = a.reg.Register(targetConnStateMetric)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to register target connection state metric: %v\", err)\n\t}\n\ta.configLock.RLock()\n\tfor _, t := range a.Config.Targets {\n\t\ttargetUPMetric.WithLabelValues(t.Name).Set(0)\n\t\ttargetConnStateMetric.WithLabelValues(t.Name).Set(0)\n\t}\n\ta.configLock.RUnlock()\n\tgo func() {\n\t\tticker := time.NewTicker(clusterMetricsUpdatePeriod)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-a.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\townTargets := make(map[string]string)\n\t\t\t\tif a.isLeader {\n\t\t\t\t\tlockedNodesPrefix := fmt.Sprintf(\"gnmic/%s/targets\", a.Config.ClusterName)\n\t\t\t\t\tctx, cancel := context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2)\n\t\t\t\t\tlockedNodes, err := a.locker.List(ctx, lockedNodesPrefix)\n\t\t\t\t\tcancel()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ta.Logger.Printf(\"failed to get locked nodes key: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tfor k, v := range lockedNodes {\n\t\t\t\t\t\townTargets[strings.TrimPrefix(k, lockedNodesPrefix+\"/\")] = v\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttargetUPMetric.Reset()\n\t\t\t\ttargetConnStateMetric.Reset()\n\t\t\t\ta.configLock.RLock()\n\t\t\t\tfor _, tc := range a.Config.Targets {\n\t\t\t\t\ta.operLock.RLock()\n\t\t\t\t\tt, ok := a.Targets[tc.Name]\n\t\t\t\t\ta.operLock.RUnlock()\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tswitch t.ConnState() {\n\t\t\t\t\t\tcase \"IDLE\":\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(1)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(1)\n\t\t\t\t\t\tcase \"CONNECTING\":\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(2)\n\t\t\t\t\t\tcase \"READY\":\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(1)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(3)\n\t\t\t\t\t\tcase \"TRANSIENT_FAILURE\":\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(4)\n\t\t\t\t\t\tcase \"SHUTDOWN\":\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(5)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif a.isLeader {\n\t\t\t\t\t\t\tif ownTargets[tc.Name] == a.Config.Clustering.InstanceName {\n\t\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttargetUPMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t\ttargetConnStateMetric.WithLabelValues(tc.Name).Set(0)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ta.configLock.RUnlock()\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (a *App) startClusterMetrics() {\n\tif a.Config.APIServer == nil || !a.Config.APIServer.EnableMetrics || a.Config.Clustering == nil {\n\t\treturn\n\t}\n\tvar err error\n\terr = a.reg.Register(clusterNumberOfLockedTargets)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to register metric: %v\", err)\n\t}\n\terr = a.reg.Register(clusterIsLeader)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to register metric: %v\", err)\n\t}\n\tticker := time.NewTicker(clusterMetricsUpdatePeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-a.ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tctx, cancel := context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2)\n\t\t\tleaderKey := fmt.Sprintf(\"gnmic/%s/leader\", a.Config.ClusterName)\n\t\t\tleader, err := a.locker.List(ctx, leaderKey)\n\t\t\tcancel()\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to get leader key: %v\", err)\n\t\t\t}\n\t\t\tif leader[leaderKey] == a.Config.Clustering.InstanceName {\n\t\t\t\tclusterIsLeader.Set(1)\n\t\t\t} else {\n\t\t\t\tclusterIsLeader.Set(0)\n\t\t\t}\n\n\t\t\tlockedNodesPrefix := fmt.Sprintf(\"gnmic/%s/targets\", a.Config.ClusterName)\n\t\t\tctx, cancel = context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2)\n\t\t\tlockedNodes, err := a.locker.List(ctx, lockedNodesPrefix)\n\t\t\tcancel()\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to get locked nodes key: %v\", err)\n\t\t\t}\n\t\t\tnumLockedNodes := 0\n\t\t\tfor _, v := range lockedNodes {\n\t\t\t\tif v == a.Config.Clustering.InstanceName {\n\t\t\t\t\tnumLockedNodes++\n\t\t\t\t}\n\t\t\t}\n\t\t\tclusterNumberOfLockedTargets.Set(float64(numLockedNodes))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/app/outputs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\nfunc (a *App) InitOutput(ctx context.Context, name string, tcs map[string]*types.TargetConfig) {\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\tif _, ok := a.Outputs[name]; ok {\n\t\treturn\n\t}\n\twg := new(sync.WaitGroup)\n\tif cfg, ok := a.Config.Outputs[name]; ok {\n\t\tif outType, ok := cfg[\"type\"]; ok {\n\t\t\ta.Logger.Printf(\"starting output type %s\", outType)\n\t\t\tif initializer, ok := outputs.Outputs[outType.(string)]; ok {\n\t\t\t\tout := initializer()\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\terr := out.Init(ctx, name, cfg,\n\t\t\t\t\t\toutputs.WithLogger(a.Logger),\n\t\t\t\t\t\toutputs.WithRegistry(a.reg),\n\t\t\t\t\t\toutputs.WithName(a.Config.InstanceName),\n\t\t\t\t\t\toutputs.WithClusterName(a.Config.ClusterName),\n\t\t\t\t\t\toutputs.WithConfigStore(a.Store),\n\t\t\t\t\t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ta.Logger.Printf(\"failed to init output type %q: %v\", outType, err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\ta.operLock.Lock()\n\t\t\t\ta.Outputs[name] = out\n\t\t\t\ta.operLock.Unlock()\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc (a *App) InitOutputs(ctx context.Context) {\n\tfor name := range a.Config.Outputs {\n\t\ta.InitOutput(ctx, name, a.Config.Targets)\n\t}\n}\n\n// AddOutputConfig adds an output called name, with config cfg if it does not already exist\nfunc (a *App) AddOutputConfig(name string, cfg map[string]interface{}) error {\n\t// if a.Outputs == nil {\n\t// \ta.Outputs = make(map[string]outputs.Output)\n\t// }\n\tif a.Config.Outputs == nil {\n\t\ta.Config.Outputs = make(map[string]map[string]interface{})\n\t}\n\tif _, ok := a.Outputs[name]; ok {\n\t\treturn fmt.Errorf(\"output %q already exists\", name)\n\t}\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\ta.Config.Outputs[name] = cfg\n\treturn nil\n}\n\nfunc (a *App) DeleteOutput(name string) error {\n\tif a.Outputs == nil {\n\t\treturn nil\n\t}\n\ta.operLock.Lock()\n\tdefer a.operLock.Unlock()\n\tif _, ok := a.Outputs[name]; !ok {\n\t\treturn fmt.Errorf(\"output %q does not exist\", name)\n\t}\n\to := a.Outputs[name]\n\terr := o.Close()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to close output %q: %v\", name, err)\n\t}\n\tdelete(a.Outputs, name)\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/path.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/manifoldco/promptui\"\n\t\"github.com/openconfig/goyang/pkg/yang\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n)\n\ntype pathGenOpts struct {\n\tsearch        bool\n\twithDescr     bool\n\twithTypes     bool\n\twithPrefix    bool\n\tpathType      string\n\tstateOnly     bool\n\tconfigOnly    bool\n\tjson          bool\n\twithNonLeaves bool\n}\n\ntype generatedPath struct {\n\tPath           string   `json:\"path,omitempty\"`\n\tPathWithPrefix string   `json:\"path-with-prefix,omitempty\"`\n\tType           string   `json:\"type,omitempty\"`\n\tEnumValues     []string `json:\"enum-values,omitempty\"`\n\tDescription    string   `json:\"description,omitempty\"`\n\tDefault        string   `json:\"default,omitempty\"`\n\tIsState        bool     `json:\"is-state,omitempty\"`\n\tNamespace      string   `json:\"namespace,omitempty\"`\n\tFeatureList    []string `json:\"if-features,omitempty\"`\n}\n\nfunc (a *App) PathCmdRun(d, f, e []string, pgo pathGenOpts) error {\n\terr := a.generateYangSchema(f, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgpaths := make([]*generatedPath, 0, 256)\n\tcollected := make([]*yang.Entry, 0, 256)\n\tfor _, entry := range a.SchemaTree.Dir {\n\t\tcollected = append(collected, collectSchemaNodes(entry, !pgo.withNonLeaves)...)\n\t}\n\tfor _, entry := range collected {\n\t\t// don't produce such paths in case of non-leaves\n\t\tif entry.IsCase() || entry.IsChoice() {\n\t\t\tcontinue\n\t\t}\n\t\tif !pgo.stateOnly && !pgo.configOnly || pgo.stateOnly && pgo.configOnly {\n\t\t\tgpaths = append(gpaths, a.generatePath(entry, pgo.pathType))\n\t\t\tcontinue\n\t\t}\n\t\tstate := isState(entry)\n\t\tif state && pgo.stateOnly {\n\t\t\tgpaths = append(gpaths, a.generatePath(entry, pgo.pathType))\n\t\t\tcontinue\n\t\t}\n\t\tif !state && pgo.configOnly {\n\t\t\tgpaths = append(gpaths, a.generatePath(entry, pgo.pathType))\n\t\t\tcontinue\n\t\t}\n\t}\n\tsort.Slice(gpaths, func(i, j int) bool {\n\t\treturn gpaths[i].Path < gpaths[j].Path\n\t})\n\tfor _, gp := range gpaths {\n\t\tgp.PathWithPrefix = collapsePrefixes(gp.PathWithPrefix)\n\t}\n\tif pgo.json {\n\t\tb, err := json.MarshalIndent(gpaths, \"\", \"  \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(os.Stdout, string(b))\n\t\treturn nil\n\t}\n\n\tif len(gpaths) == 0 {\n\t\treturn errors.New(\"no results found\")\n\t}\n\n\t// regular print\n\tif !pgo.search {\n\t\tsb := new(strings.Builder)\n\t\tfor _, gp := range gpaths {\n\t\t\tsb.Reset()\n\t\t\tif pgo.withPrefix {\n\t\t\t\tsb.WriteString(gp.PathWithPrefix)\n\t\t\t} else {\n\t\t\t\tsb.WriteString(gp.Path)\n\t\t\t}\n\t\t\tif pgo.withTypes {\n\t\t\t\tsb.WriteString(\"\\t(type=\")\n\t\t\t\tsb.WriteString(gp.Type)\n\t\t\t\tsb.WriteString(\")\")\n\t\t\t}\n\t\t\tif pgo.withDescr {\n\t\t\t\tsb.WriteString(\"\\n\")\n\t\t\t\tsb.WriteString(indent(\"\\t\", gp.Description))\n\t\t\t}\n\t\t\tfmt.Fprintln(os.Stdout, sb.String())\n\t\t}\n\t\treturn nil\n\t}\n\t// search\n\tpaths := make([]string, 0, len(gpaths))\n\tfor _, gp := range gpaths {\n\t\tpaths = append(paths, gp.Path)\n\t}\n\tp := promptui.Select{\n\t\tLabel:        \"select path\",\n\t\tItems:        paths,\n\t\tSize:         10,\n\t\tStdout:       os.Stdout,\n\t\tHideSelected: true,\n\t\tSearcher: func(input string, index int) bool {\n\t\t\tkws := strings.Split(input, \" \")\n\t\t\tresult := true\n\t\t\tfor _, kw := range kws {\n\t\t\t\tif strings.HasPrefix(kw, \"!\") {\n\t\t\t\t\tkw = strings.TrimLeft(kw, \"!\")\n\t\t\t\t\tif kw == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tresult = result && !strings.Contains(paths[index], kw)\n\t\t\t\t} else {\n\t\t\t\t\tresult = result && strings.Contains(paths[index], kw)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result\n\t\t},\n\t\tKeys: &promptui.SelectKeys{\n\t\t\tPrev:     promptui.Key{Code: promptui.KeyPrev, Display: promptui.KeyPrevDisplay},\n\t\t\tNext:     promptui.Key{Code: promptui.KeyNext, Display: promptui.KeyNextDisplay},\n\t\t\tPageUp:   promptui.Key{Code: promptui.KeyBackward, Display: promptui.KeyBackwardDisplay},\n\t\t\tPageDown: promptui.Key{Code: promptui.KeyForward, Display: promptui.KeyForwardDisplay},\n\t\t\tSearch:   promptui.Key{Code: ':', Display: \":\"},\n\t\t},\n\t}\n\tindex, selected, err := p.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(selected)\n\tfmt.Println(a.generateTypeInfo(collected[index]))\n\n\treturn nil\n}\n\nfunc (a *App) PathPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\tif a.Config.PathSearch && a.Config.PathWithDescr {\n\t\treturn errors.New(\"flags --search and --descr cannot be used together\")\n\t}\n\tif a.Config.LocalFlags.PathPathType != \"xpath\" && a.Config.LocalFlags.PathPathType != \"gnmi\" {\n\t\treturn errors.New(\"path-type must be one of 'xpath' or 'gnmi'\")\n\t}\n\treturn a.yangFilesPreProcessing()\n}\n\nfunc (a *App) PathRunE(cmd *cobra.Command, args []string) error {\n\treturn a.PathCmdRun(\n\t\ta.Config.GlobalFlags.Dir,\n\t\ta.Config.GlobalFlags.File,\n\t\ta.Config.GlobalFlags.Exclude,\n\t\tpathGenOpts{\n\t\t\tsearch:     a.Config.LocalFlags.PathSearch,\n\t\t\twithDescr:  a.Config.LocalFlags.PathWithDescr,\n\t\t\twithTypes:  a.Config.LocalFlags.PathWithTypes,\n\t\t\twithPrefix: a.Config.LocalFlags.PathWithPrefix,\n\t\t\tpathType:   a.Config.LocalFlags.PathPathType,\n\t\t\tstateOnly:  a.Config.LocalFlags.PathState,\n\t\t\tconfigOnly: a.Config.LocalFlags.PathConfig,\n\t\t},\n\t)\n}\n\nfunc (a *App) InitPathFlags(cmd *cobra.Command) {\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.PathPathType, \"path-type\", \"\", \"xpath\", \"path type xpath or gnmi\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithDescr, \"descr\", \"\", false, \"print leaf description\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithPrefix, \"with-prefix\", \"\", false, \"include module/submodule prefix in path elements\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithTypes, \"types\", \"\", false, \"print leaf type\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathSearch, \"search\", \"\", false, \"search through path list\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathState, \"state-only\", \"\", false, \"generate paths only for YANG leafs representing state data\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.PathConfig, \"config-only\", \"\", false, \"generate paths only for YANG leafs representing config data\")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc collectSchemaNodes(e *yang.Entry, leafOnly bool) []*yang.Entry {\n\tif e == nil {\n\t\treturn []*yang.Entry{}\n\t}\n\tcollected := make([]*yang.Entry, 0, 128)\n\tfor _, child := range e.Dir {\n\t\tcollected = append(collected,\n\t\t\tcollectSchemaNodes(child, leafOnly)...)\n\t}\n\n\tif e.Parent != nil {\n\t\tswitch {\n\t\tcase e.Dir == nil && e.ListAttr != nil: // leaf-list\n\t\t\tfallthrough\n\t\tcase e.Dir == nil: // leaf\n\t\t\tf := &yang.Entry{\n\t\t\t\tParent:      e.Parent,\n\t\t\t\tNode:        e.Node,\n\t\t\t\tName:        e.Name,\n\t\t\t\tDescription: e.Description,\n\t\t\t\tDefault:     e.Default,\n\t\t\t\tUnits:       e.Units,\n\t\t\t\tKind:        e.Kind,\n\t\t\t\tConfig:      e.Config,\n\t\t\t\tPrefix:      e.Prefix,\n\t\t\t\tMandatory:   e.Mandatory,\n\t\t\t\tDir:         e.Dir,\n\t\t\t\tKey:         e.Key,\n\t\t\t\tType:        e.Type,\n\t\t\t\tExts:        e.Exts,\n\t\t\t\tListAttr:    e.ListAttr,\n\t\t\t\tExtra:       make(map[string][]any),\n\t\t\t}\n\t\t\tfor k, v := range e.Extra {\n\t\t\t\tf.Extra[k] = v\n\t\t\t}\n\t\t\tcollected = append(collected, f)\n\t\tcase e.ListAttr != nil: // list\n\t\t\tfallthrough\n\t\tdefault: // container\n\t\t\tif !leafOnly {\n\t\t\t\tcollected = append(collected, e)\n\t\t\t}\n\t\t\tif len(e.Extra[\"if-feature\"]) > 0 {\n\t\t\t\tfor _, myleaf := range collected {\n\t\t\t\t\tif myleaf.Extra[\"if-feature\"] == nil {\n\t\t\t\t\t\tmyleaf.Extra[\"if-feature\"] = e.Extra[\"if-feature\"]\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\tLOOP:\n\t\t\t\t\tfor _, f := range e.Extra[\"if-feature\"] {\n\t\t\t\t\t\tfor _, mlf := range myleaf.Extra[\"if-feature\"] {\n\t\t\t\t\t\t\tif ff, ok := f.(*yang.Value); ok && ff != nil {\n\t\t\t\t\t\t\t\tif mlff, ok := mlf.(*yang.Value); ok && mlff != nil {\n\t\t\t\t\t\t\t\t\tif ff.Source == nil || mlff.Source == nil {\n\t\t\t\t\t\t\t\t\t\tcontinue LOOP\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tif ff.Source.Argument == mlff.Source.Argument {\n\t\t\t\t\t\t\t\t\t\tcontinue LOOP\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tmyleaf.Extra[\"if-feature\"] = append(myleaf.Extra[\"if-feature\"], f)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn collected\n}\n\nfunc (a *App) generatePath(entry *yang.Entry, pType string) *generatedPath {\n\tgp := new(generatedPath)\n\tfor e := entry; e != nil && e.Parent != nil; e = e.Parent {\n\t\tif e.IsCase() || e.IsChoice() {\n\t\t\tcontinue\n\t\t}\n\t\telementName := e.Name\n\t\tprefixedElementName := e.Name\n\t\tif e.Prefix != nil {\n\t\t\tif e.Prefix.Parent != nil {\n\t\t\t\tprefixedElementName = fmt.Sprintf(\"%s:%s\", e.Prefix.Parent.NName(), prefixedElementName)\n\t\t\t} else {\n\t\t\t\tprefixedElementName = fmt.Sprintf(\"%s:%s\", e.Prefix.NName(), prefixedElementName)\n\t\t\t}\n\t\t}\n\t\tif e.Key != \"\" {\n\t\t\tfor _, k := range strings.Fields(e.Key) {\n\t\t\t\telementName = fmt.Sprintf(\"%s[%s=*]\", elementName, k)\n\t\t\t\tprefixedElementName = fmt.Sprintf(\"%s[%s=*]\", prefixedElementName, k)\n\t\t\t}\n\t\t}\n\t\tgp.Path = fmt.Sprintf(\"/%s%s\", elementName, gp.Path)\n\t\tif e.Prefix != nil {\n\t\t\tgp.PathWithPrefix = fmt.Sprintf(\"/%s%s\", prefixedElementName, gp.PathWithPrefix)\n\t\t}\n\t}\n\tif ifFeature, ok := entry.Extra[\"if-feature\"]; ok && ifFeature != nil {\n\tAPPEND:\n\t\tfor _, feature := range ifFeature {\n\t\t\tf, ok := feature.(*yang.Value)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ef := range gp.FeatureList {\n\t\t\t\tif ef == f.Source.Argument {\n\t\t\t\t\tcontinue APPEND\n\t\t\t\t}\n\t\t\t}\n\t\t\tgp.FeatureList = append(gp.FeatureList, strings.Split(f.Source.Argument, \" and \")...)\n\t\t}\n\t}\n\n\tgp.Description = entry.Description\n\tif entry.Type != nil {\n\t\tgp.Type = entry.Type.Name\n\t\tif gp.Type == \"enumeration\" {\n\t\t\tgp.EnumValues = entry.Type.Enum.Names()\n\t\t}\n\t} else if entry.IsList() {\n\t\tgp.Type = \"[list]\"\n\t} else {\n\t\tgp.Type = \"[container]\"\n\t}\n\n\tif entry.IsLeafList() {\n\t\tgp.Default = strings.Join(entry.DefaultValues(), \", \")\n\t} else {\n\t\tgp.Default, _ = entry.SingleDefaultValue()\n\t}\n\n\tgp.IsState = isState(entry)\n\tgp.Namespace = entry.Namespace().NName()\n\tif pType == \"gnmi\" {\n\t\tgnmiPath, err := path.ParsePath(gp.Path)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"path: %s could not be changed to gnmi format: %v\\n\", gp.Path, err)\n\t\t}\n\t\tgp.Path = gnmiPath.String()\n\t}\n\treturn gp\n}\n\nfunc (a *App) generateTypeInfo(e *yang.Entry) string {\n\tif e == nil || e.Type == nil {\n\t\treturn \"unknown type\"\n\t}\n\tt := e.Type\n\trstr := fmt.Sprintf(\"- type: %s\", t.Kind)\n\tswitch t.Kind {\n\tcase yang.Ybits:\n\t\tdata := getAnnotation(e, \"bits\")\n\t\tif data != nil {\n\t\t\trstr += fmt.Sprintf(\" %v\", data)\n\t\t}\n\tcase yang.Yenum:\n\t\tdata := getAnnotation(e, \"enum\")\n\t\tif data != nil {\n\t\t\trstr += fmt.Sprintf(\" %v\", data)\n\t\t}\n\tcase yang.Yleafref:\n\t\trstr += fmt.Sprintf(\" %q\", t.Path)\n\tcase yang.Yidentityref:\n\t\trstr += fmt.Sprintf(\" %q\", t.IdentityBase.Name)\n\t\tif a.Config.LocalFlags.PathWithPrefix {\n\t\t\tdata := getAnnotation(e, \"prefix-qualified-identities\")\n\t\t\tif data != nil {\n\t\t\t\trstr += fmt.Sprintf(\" %v\", data)\n\t\t\t}\n\t\t} else {\n\t\t\tidentities := make([]string, 0, 64)\n\t\t\tfor i := range t.IdentityBase.Values {\n\t\t\t\tidentities = append(identities, t.IdentityBase.Values[i].Name)\n\t\t\t}\n\t\t\trstr += fmt.Sprintf(\" %v\", identities)\n\t\t}\n\n\tcase yang.Yunion:\n\t\tunionlist := make([]string, 0, len(t.Type))\n\t\tfor i := range t.Type {\n\t\t\tunionlist = append(unionlist, t.Type[i].Name)\n\t\t}\n\t\trstr += fmt.Sprintf(\" %v\", unionlist)\n\tdefault:\n\t}\n\trstr += \"\\n\"\n\n\tif t.Root != nil {\n\t\tdata := getAnnotation(e, \"root.type\")\n\t\tif data != nil && t.Kind.String() != data.(string) {\n\t\t\trstr += fmt.Sprintf(\"- root.type: %v\\n\", data)\n\t\t}\n\t}\n\tif t.Units != \"\" {\n\t\trstr += fmt.Sprintf(\"- units: %s\\n\", t.Units)\n\t}\n\tif t.Default != \"\" {\n\t\trstr += fmt.Sprintf(\"- default: %q\\n\", t.Default)\n\t}\n\tif t.FractionDigits != 0 {\n\t\trstr += fmt.Sprintf(\"- fraction-digits: %d\\n\", t.FractionDigits)\n\t}\n\tif len(t.Length) > 0 {\n\t\trstr += fmt.Sprintf(\"- length: %s\\n\", t.Length)\n\t}\n\tif t.Kind == yang.YinstanceIdentifier && !t.OptionalInstance {\n\t\trstr += \"- required\\n\"\n\t}\n\n\tif len(t.Pattern) > 0 {\n\t\trstr += fmt.Sprintf(\"- pattern: %s\\n\", strings.Join(t.Pattern, \"|\"))\n\t}\n\tb := yang.BaseTypedefs[t.Kind.String()].YangType\n\tif len(t.Range) > 0 && !t.Range.Equal(b.Range) {\n\t\trstr += fmt.Sprintf(\"- range: %s\\n\", t.Range)\n\t}\n\treturn rstr\n}\n\nfunc getAnnotation(entry *yang.Entry, name string) any {\n\tif entry.Annotation != nil {\n\t\tdata, ok := entry.Annotation[name]\n\t\tif ok {\n\t\t\treturn data\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isState(e *yang.Entry) bool {\n\tif e.Config == yang.TSFalse {\n\t\treturn true\n\t}\n\tif e.Parent != nil {\n\t\treturn isState(e.Parent)\n\t}\n\treturn false\n}\n\n// collapsePrefixes removes prefixes from path element names and keys\nfunc collapsePrefixes(p string) string {\n\tgp, err := path.ParsePath(p)\n\tif err != nil {\n\t\treturn p\n\t}\n\tparentPrefix := \"\"\n\tfor _, pe := range gp.Elem {\n\t\tcurrentPrefix, name := getPrefixElem(pe.Name)\n\t\tif parentPrefix == \"\" || parentPrefix != currentPrefix {\n\t\t\t// first elem or updating parent prefix\n\t\t\tparentPrefix = currentPrefix\n\t\t} else if currentPrefix == parentPrefix {\n\t\t\tpe.Name = name\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"/%s\", path.GnmiPathToXPath(gp, false))\n}\n\n// takes a path element name or a key name\n// and returns the prefix and name\nfunc getPrefixElem(pe string) (string, string) {\n\tif pe == \"\" {\n\t\treturn \"\", \"\"\n\t}\n\tpes := strings.SplitN(pe, \":\", 2)\n\tif len(pes) > 1 {\n\t\treturn pes[0], pes[1]\n\t}\n\treturn \"\", pes[0]\n}\n"
  },
  {
    "path": "pkg/app/path_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport \"testing\"\n\nvar collapseTestSet = map[string][]string{\n\t\"1\": {\n\t\t\"\",\n\t\t\"/\",\n\t},\n\t\"2\": {\n\t\t\"/prefix1:elem1[key1=*]/prefix1:elem2/prefix2:elem3/prefix2:elem4\",\n\t\t\"/prefix1:elem1[key1=*]/elem2/prefix2:elem3/elem4\",\n\t},\n\t\"3\": {\n\t\t\"/prefix1:elem1[key1=*]/prefix1:elem2/prefix2:elem3/prefix2:elem4\",\n\t\t\"/prefix1:elem1[key1=*]/elem2/prefix2:elem3/elem4\",\n\t},\n\t\"4\": {\n\t\t\"/fake_prefix:\",\n\t\t\"/fake_prefix:\",\n\t},\n\t\"5\": {\n\t\t\"/:fake_prefix\",\n\t\t\"/:fake_prefix\",\n\t},\n\t\"6\": {\n\t\t\"/elem1/prefix1:elem2/prefix1:elem3\",\n\t\t\"/elem1/prefix1:elem2/elem3\",\n\t},\n}\n\nfunc TestCollapsePrefixes(t *testing.T) {\n\tfor name, item := range collapseTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tr := collapsePrefixes(item[0])\n\t\t\tif r != item[1] {\n\t\t\t\tt.Logf(\"failed at item %q\", name)\n\t\t\t\tt.Logf(\"expected: %q\", item[1])\n\t\t\t\tt.Logf(\"\t got: %q\", r)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/app/plugins.go",
    "content": "package app\n\nimport \"github.com/openconfig/gnmic/pkg/formatters/plugin_manager\"\n\nfunc (a *App) initPluginManager() error {\n\tpc, err := a.Config.GetPluginsConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif pc == nil {\n\t\treturn nil\n\t}\n\ta.pm = plugin_manager.New(pc, a.Logger.Writer())\n\treturn a.pm.Load()\n}\n\nfunc (a *App) CleanupPlugins() {\n\tif a.pm == nil {\n\t\treturn\n\t}\n\ta.pm.Cleanup()\n}\n"
  },
  {
    "path": "pkg/app/pprof.go",
    "content": "package app\n\nimport (\n\t\"net/http\"\n\t_ \"net/http/pprof\" //nolint:gosec // Import for pprof, only enabled via CLI flag\n\t\"time\"\n)\n\ntype pprofServer struct {\n\terr chan error\n}\n\nfunc newPprofServer() *pprofServer {\n\treturn &pprofServer{\n\t\terr: make(chan error, 1),\n\t}\n}\n\nfunc (p *pprofServer) Start(address string) {\n\tgo func() {\n\t\tserver := &http.Server{\n\t\t\tAddr:              address,\n\t\t\tReadHeaderTimeout: 10 * time.Second,\n\t\t}\n\n\t\tif err := server.ListenAndServe(); err != nil {\n\t\t\tp.err <- err\n\t\t}\n\t\tclose(p.err)\n\t}()\n}\n\nfunc (p *pprofServer) ErrChan() <-chan error {\n\treturn p.err\n}\n"
  },
  {
    "path": "pkg/app/processor.go",
    "content": "package app\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\tpromcom \"github.com/openconfig/gnmic/pkg/outputs/prometheus_output\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/expfmt\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc (a *App) ProcessorPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\n\terr := a.initPluginManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *App) ProcessorRunE(cmd *cobra.Command, args []string) error {\n\tactionsConfig, err := a.Config.GetActions()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading actions config: %v\", err)\n\t}\n\tpConfig, err := a.Config.GetEventProcessors()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading event processors config: %v\", err)\n\t}\n\ttcs, err := a.Config.GetTargets()\n\tif err != nil {\n\t\tif !errors.Is(err, config.ErrNoTargetsFound) {\n\t\t\treturn err\n\t\t}\n\t}\n\t// initialize processors\n\tevps, err := formatters.MakeEventProcessors(\n\t\ta.Logger,\n\t\ta.Config.LocalFlags.ProcessorName,\n\t\tpConfig,\n\t\ttcs,\n\t\tactionsConfig,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// read input file\n\tinputBytes, err := file.ReadFile(cmd.Context(), a.Config.LocalFlags.ProcessorInput)\n\tif err != nil {\n\t\treturn err\n\t}\n\tevInput := make([][]*formatters.EventMsg, 0)\n\tmsgs := bytes.Split(inputBytes, []byte(a.Config.LocalFlags.ProcessorInputDelimiter))\n\tfor i, bg := range msgs {\n\t\tif len(bg) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tmevs := make([]map[string]any, 0)\n\t\terr = json.Unmarshal(bg, &mevs)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed json Unmarshal at msg index %d: %s: %v\", i, bg, err)\n\t\t}\n\n\t\tevs := make([]*formatters.EventMsg, 0, len(mevs))\n\t\tfor _, mev := range mevs {\n\t\t\tev, err := formatters.EventFromMap(mev)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tevs = append(evs, ev)\n\t\t}\n\t\tevInput = append(evInput, evs)\n\t}\n\trrevs := make([][]*formatters.EventMsg, 0, len(evInput))\n\tfor _, evs := range evInput {\n\t\trevs := evs\n\t\tfor _, p := range evps {\n\t\t\trevs = p.Apply(revs...)\n\t\t}\n\t\trrevs = append(rrevs, revs)\n\t}\n\n\tif len(a.Config.LocalFlags.ProcessorOutput) != 0 {\n\t\tb, err := a.promFormat(rrevs, a.Config.LocalFlags.ProcessorOutput)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(b))\n\t\treturn nil\n\t}\n\n\tnumEvOut := len(rrevs)\n\tfor i, rev := range rrevs {\n\t\tb, err := json.MarshalIndent(rev, \"\", \"  \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(b))\n\t\tif i == numEvOut-1 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) InitProcessorFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorInput, \"input\", \"\", \"\", \"processors input\")\n\tcmd.MarkFlagRequired(\"input\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorInputDelimiter, \"delimiter\", \"\", \"\\n\", \"processors input delimiter\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.ProcessorName, \"name\", \"\", nil, \"list of processors to apply to the input\")\n\tcmd.MarkFlagRequired(\"name\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorOutput, \"output\", \"\", \"\", \"output name\")\n}\n\nfunc (a *App) promFormat(rrevs [][]*formatters.EventMsg, outName string) ([]byte, error) {\n\t// read output config\n\toutputPath := \"outputs/\" + outName\n\toutputConfig := a.Config.FileConfig.GetStringMap(outputPath)\n\tif outputConfig == nil {\n\t\treturn nil, fmt.Errorf(\"unknown output name: %s\", outName)\n\t}\n\toutType := a.Config.FileConfig.GetString(outputPath + \"/type\")\n\tif outType != \"prometheus\" && outType != \"remote_write\" {\n\t\treturn nil, fmt.Errorf(\"output %q must be of type 'prometheus' or 'remote_write'\", outName)\n\t}\n\tmb := &promcom.MetricBuilder{\n\t\tPrefix:                 a.Config.FileConfig.GetString(outputPath + \"/metric-prefix\"),\n\t\tAppendSubscriptionName: a.Config.FileConfig.GetBool(outputPath + \"/append-subscription-name\"),\n\t\tStringsAsLabels:        a.Config.FileConfig.GetBool(outputPath + \"/strings-as-labels\"),\n\t\tOverrideTimestamps:     a.Config.FileConfig.GetBool(outputPath + \"/override-timestamps\"),\n\t\tExportTimestamps:       a.Config.FileConfig.GetBool(outputPath + \"/export-timestamps\"),\n\t}\n\n\tb := new(bytes.Buffer)\n\tnow := time.Now()\n\tfor _, revs := range rrevs {\n\t\tfor _, ev := range revs {\n\t\t\tpms := mb.MetricsFromEvent(ev, now)\n\t\t\tfor _, pm := range pms {\n\t\t\t\tm := &dto.Metric{}\n\t\t\t\terr := pm.Write(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t_, err = expfmt.MetricFamilyToText(b, &dto.MetricFamily{\n\t\t\t\t\tName:   pointer.ToString(pm.Name),\n\t\t\t\t\tHelp:   pointer.ToString(\"gNMIc generated metric\"),\n\t\t\t\t\tType:   dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{m},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn b.Bytes(), nil\n}\n"
  },
  {
    "path": "pkg/app/prompt.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/mitchellh/go-homedir\"\n\t\"github.com/nsf/termbox-go\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nfunc (a *App) PromptRunE(cmd *cobra.Command, args []string) error {\n\terr := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to load paths from yang: %v\", err)\n\t\tif !a.Config.Log {\n\t\t\tfmt.Fprintf(os.Stderr, \"ERR: failed to load paths from yang: %v\\n\", err)\n\t\t}\n\t}\n\ta.PromptMode = true\n\t// load history\n\ta.PromptHistory = make([]string, 0, 256)\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\tif a.Config.Debug {\n\t\t\ta.Logger.Printf(\"failed to get home directory: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tcontent, err := os.ReadFile(filepath.Join(home, \".gnmic.history\"))\n\tif err != nil {\n\t\tif a.Config.Debug {\n\t\t\ta.Logger.Printf(\"failed to read history file: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\thistory := strings.Split(string(content), \"\\n\")\n\tfor i := range history {\n\t\tif history[i] != \"\" {\n\t\t\ta.PromptHistory = append(a.PromptHistory, history[i])\n\t\t}\n\t}\n\treturn nil\n}\n\n// PreRun resolve the glob patterns and checks if --max-suggestions is bigger that the terminal height and lowers it if needed.\nfunc (a *App) PromptPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\terr := a.yangFilesPreProcessing()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = termbox.Init()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not initialize a terminal box: %v\", err)\n\t}\n\t_, h := termbox.Size()\n\ttermbox.Close()\n\t// set max suggestions to terminal height-1 if the supplied value is greater\n\tif uint(a.Config.LocalFlags.PromptMaxSuggestions) > uint(h) {\n\t\tif h > 1 {\n\t\t\ta.Config.LocalFlags.PromptMaxSuggestions = uint16(h - 2)\n\t\t} else {\n\t\t\ta.Config.LocalFlags.PromptMaxSuggestions = 0\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) InitPromptFlags(cmd *cobra.Command) {\n\tcmd.Flags().Uint16Var(&a.Config.LocalFlags.PromptMaxSuggestions, \"max-suggestions\", 10, \"terminal suggestion max list size\")\n\tcmd.Flags().StringVar(&a.Config.LocalFlags.PromptPrefixColor, \"prefix-color\", \"dark_blue\", \"terminal prefix color\")\n\tcmd.Flags().StringVar(&a.Config.LocalFlags.PromptSuggestionsBGColor, \"suggestions-bg-color\", \"dark_blue\", \"suggestion box background color\")\n\tcmd.Flags().StringVar(&a.Config.LocalFlags.PromptDescriptionBGColor, \"description-bg-color\", \"dark_gray\", \"description box background color\")\n\tcmd.Flags().BoolVar(&a.Config.LocalFlags.PromptSuggestAllFlags, \"suggest-all-flags\", false, \"suggest local as well as inherited flags of subcommands\")\n\tcmd.Flags().BoolVar(&a.Config.LocalFlags.PromptDescriptionWithPrefix, \"description-with-prefix\", false, \"show YANG module prefix in XPATH suggestion description\")\n\tcmd.Flags().BoolVar(&a.Config.LocalFlags.PromptDescriptionWithTypes, \"description-with-types\", false, \"show YANG types in XPATH suggestion description\")\n\tcmd.Flags().BoolVar(&a.Config.LocalFlags.PromptSuggestWithOrigin, \"suggest-with-origin\", false, \"suggest XPATHs with origin prepended \")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/app/proxy.go",
    "content": "// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/server\"\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\ntype targetSubscribeResponse struct {\n\tname string\n\trsp  *gnmi.SubscribeResponse\n}\n\nfunc (a *App) ProxyPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\ta.createCollectorDialOpts()\n\treturn nil\n}\n\nfunc (a *App) ProxyRunE(cmd *cobra.Command, args []string) error {\n\terr := a.Config.GetGNMIServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetAPIServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetLoader()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetSubscribeHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = a.Config.GetTargets()\n\tif errors.Is(err, config.ErrNoTargetsFound) {\n\t\tif len(a.Config.FileConfig.GetStringMap(\"loader\")) == 0 &&\n\t\t\t!a.Config.UseTunnelServer {\n\t\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t}\n\n\ta.startAPIServer()\n\tgo a.startLoaderProxy(cmd.Context())\n\tgo a.registerGNMIServer(cmd.Context(), \"isProxy=true\")\n\treturn a.startGNMIProxyServer(cmd.Context())\n}\n\nfunc (a *App) startGNMIProxyServer(ctx context.Context) error {\n\ts, err := server.New(server.Config{\n\t\tAddress:              a.Config.GnmiServer.Address,\n\t\tMaxUnaryRPC:          a.Config.GnmiServer.MaxUnaryRPC,\n\t\tMaxStreamingRPC:      a.Config.GnmiServer.MaxSubscriptions,\n\t\tMaxRecvMsgSize:       a.Config.GnmiServer.MaxRecvMsgSize,\n\t\tMaxSendMsgSize:       a.Config.GnmiServer.MaxSendMsgSize,\n\t\tMaxConcurrentStreams: a.Config.GnmiServer.MaxConcurrentStreams,\n\t\tTCPKeepalive:         a.Config.GnmiServer.TCPKeepalive,\n\t\tKeepalive:            a.Config.GnmiServer.GRPCKeepalive.Convert(),\n\t\tHealthEnabled:        true,\n\t\tRateLimit:            a.Config.GnmiServer.RateLimit,\n\t\tTimeout:              a.Config.GnmiServer.Timeout,\n\t\tTLS:                  a.Config.GnmiServer.TLS,\n\t}, server.WithLogger(a.Logger),\n\t\tserver.WithRegistry(a.reg),\n\t\tserver.WithGetHandler(a.proxyGetHandler),\n\t\tserver.WithSetHandler(a.proxySetHandler),\n\t\tserver.WithSubscribeHandler(a.proxySubscribeHandler))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.Start(ctx)\n}\n\nfunc (a *App) proxyGetHandler(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\ttargetName := req.GetPrefix().GetTarget()\n\tpr, _ := peer.FromContext(ctx)\n\ta.Logger.Printf(\"received Get request from %q to target %q\", pr.Addr, targetName)\n\n\ttargets, err := a.selectTargets(ctx, targetName)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"could not find targets: %v\", err)\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target %q\", targetName)\n\t}\n\n\tresults := make(chan *gnmi.Notification)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.GetResponse{\n\t\t// assume one notification target\n\t\tNotification: make([]*gnmi.Notification, 0, numTargets),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase notif, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Notification = append(response.Notification, notif)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcreq := proto.Clone(req).(*gnmi.GetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Get(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, n := range res.GetNotification() {\n\t\t\t\tif n.GetPrefix() == nil {\n\t\t\t\t\tn.Prefix = new(gnmi.Path)\n\t\t\t\t}\n\t\t\t\tif n.GetPrefix().GetTarget() == \"\" {\n\t\t\t\t\tn.Prefix.Target = name\n\t\t\t\t}\n\t\t\t\tresults <- n\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\tif a.Config.Debug {\n\t\ta.Logger.Printf(\"sending GetResponse to %q: %+v\", pr.Addr, response)\n\t}\n\treturn response, nil\n}\n\nfunc (a *App) proxySetHandler(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\tnumUpdates := len(req.GetUpdate())\n\tnumReplaces := len(req.GetReplace())\n\tnumDeletes := len(req.GetDelete())\n\tnumUnionReplace := len(req.GetUnionReplace())\n\tif numUpdates+numReplaces+numDeletes+numUnionReplace == 0 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing update/replace/delete path(s)\")\n\t}\n\n\ttargetName := req.GetPrefix().GetTarget()\n\tpr, _ := peer.FromContext(ctx)\n\ta.Logger.Printf(\"received Set request from %q to target %q\", pr.Addr, targetName)\n\n\ttargets, err := a.selectTargets(ctx, targetName)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"could not find targets: %v\", err)\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target(s) %q\", targetName)\n\t}\n\tresults := make(chan *gnmi.UpdateResult)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.SetResponse{\n\t\t// assume one update per target, per update/replace/delete\n\t\tResponse: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes+numUnionReplace)),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase upd, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tresponse.Timestamp = time.Now().UnixNano()\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Response = append(response.Response, upd)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tcreq := proto.Clone(req).(*gnmi.SetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Set(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, upd := range res.GetResponse() {\n\t\t\t\tupd.Path.Target = name\n\t\t\t\tresults <- upd\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\ta.Logger.Printf(\"sending SetResponse to %q: %+v\", pr.Addr, response)\n\treturn response, nil\n}\n\nfunc (a *App) proxySubscribeHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error {\n\tswitch req.GetRequest().(type) {\n\tcase *gnmi.SubscribeRequest_Poll:\n\t\treturn status.Errorf(codes.InvalidArgument, \"invalid request type: %T\", req.GetRequest())\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t}\n\n\tswitch req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_ONCE:\n\tcase gnmi.SubscriptionList_STREAM:\n\tcase gnmi.SubscriptionList_POLL:\n\t\treturn status.Errorf(codes.Unimplemented, \"subscribe mode POLL not implemented by the proxy\")\n\tdefault:\n\t\treturn status.Errorf(codes.InvalidArgument, \"unknown subscribe request mode: %v\", req.GetSubscribe().GetMode())\n\t}\n\n\tctx := stream.Context()\n\ttargetName := getTargetFromSubscribeRequest(req)\n\n\ttargets, err := a.selectTargets(ctx, targetName)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Internal, \"could not find target(s): %v\", err)\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn status.Errorf(codes.NotFound, \"unknown target(s) %q\", targetName)\n\t}\n\n\tswitch req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_ONCE:\n\t\treturn a.proxySubscribeONCEHandler(req, stream, targets)\n\tcase gnmi.SubscriptionList_STREAM:\n\t\treturn a.proxySubscribeSTREAMHandler(req, stream, targets)\n\t}\n\treturn nil\n}\n\nfunc (a *App) proxySubscribeONCEHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer, targets map[string]*target.Target) error {\n\tctx := stream.Context()\n\tnumTargets := len(targets)\n\n\tresults := make(chan *targetSubscribeResponse)\n\terrChan := make(chan error, numTargets)\n\tdone := make(chan struct{})\n\tstop := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tsyncs := make(map[string]struct{})\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase r, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch r.rsp.Response.(type) {\n\t\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\t\tif r.rsp.GetUpdate().GetPrefix() == nil {\n\t\t\t\t\t\tr.rsp.GetUpdate().Prefix = new(gnmi.Path)\n\t\t\t\t\t}\n\t\t\t\t\tif r.rsp.GetUpdate().GetPrefix().GetTarget() == \"\" {\n\t\t\t\t\t\tr.rsp.GetUpdate().GetPrefix().Target = r.name\n\t\t\t\t\t}\n\t\t\t\t\terr := stream.Send(r.rsp)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tclose(stop)\n\t\t\t\t\t\ta.Logger.Printf(\"proxy stream send failed: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\tsyncs[r.name] = struct{}{}\n\t\t\t\t\tif len(syncs) >= numTargets {\n\t\t\t\t\t\t// send a single sync and stop\n\t\t\t\t\t\terr := stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}})\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\ta.Logger.Printf(\"proxy stream send Sync response failed: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\tdefer cancel()\n\t\t\tcreq := proto.Clone(req).(*gnmi.SubscribeRequest)\n\t\t\tif creq.GetSubscribe().GetPrefix() == nil {\n\t\t\t\tcreq.GetSubscribe().Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetSubscribe().GetPrefix().GetTarget() == \"\" || creq.GetSubscribe().GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.GetSubscribe().Prefix.Target = name\n\t\t\t}\n\n\t\t\tresCh, errCh := t.SubscribeOnceChan(ctx, creq)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-stop:\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn\n\t\t\t\tcase r, ok := <-resCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tresults <- &targetSubscribeResponse{\n\t\t\t\t\t\tname: name,\n\t\t\t\t\t\trsp:  r,\n\t\t\t\t\t}\n\t\t\t\tcase err := <-errCh:\n\t\t\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q: closed stream(EOF)\", t.Config.Name)\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\treturn nil\n}\n\nfunc (a *App) proxySubscribeSTREAMHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer, targets map[string]*target.Target) error {\n\tctx := stream.Context()\n\tnumTargets := len(targets)\n\n\tresults := make(chan *targetSubscribeResponse)\n\terrChan := make(chan error, numTargets)\n\tdone := make(chan struct{})\n\t// used to stop target subscriptions if\n\t// the northbound subscription stops.\n\tstop := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(done)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase r, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch r.rsp.Response.(type) {\n\t\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\t\tif r.rsp.GetUpdate().GetPrefix() == nil {\n\t\t\t\t\t\tr.rsp.GetUpdate().Prefix = new(gnmi.Path)\n\t\t\t\t\t}\n\t\t\t\t\tif r.rsp.GetUpdate().GetPrefix().GetTarget() == \"\" {\n\t\t\t\t\t\tr.rsp.GetUpdate().GetPrefix().Target = r.name\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\terr := stream.Send(r.rsp)\n\t\t\t\tif err != nil {\n\t\t\t\t\tclose(stop)\n\t\t\t\t\ta.Logger.Printf(\"proxy stream send failed: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tpr, _ := peer.FromContext(ctx)\n\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\n\tfor name, t := range targets {\n\t\tgo func(name string, t *target.Target) {\n\t\t\tdefer wg.Done()\n\n\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\tdefer cancel()\n\t\t\tcreq := proto.Clone(req).(*gnmi.SubscribeRequest)\n\t\t\tif creq.GetSubscribe().GetPrefix() == nil {\n\t\t\t\tcreq.GetSubscribe().Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetSubscribe().GetPrefix().GetTarget() == \"\" || creq.GetSubscribe().GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.GetSubscribe().Prefix.Target = name\n\t\t\t}\n\t\t\tsubName := pr.Addr.String() + \"-\" + name + \"-\" + strconv.Itoa(time.Now().Nanosecond())\n\t\t\trspCh, errCh := t.SubscribeStreamChan(ctx, creq, subName)\n\t\t\tdefer t.StopSubscription(subName)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-stop:\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn\n\t\t\t\tcase r, ok := <-rspCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tresults <- &targetSubscribeResponse{\n\t\t\t\t\t\tname: name,\n\t\t\t\t\t\trsp:  r,\n\t\t\t\t\t}\n\t\t\t\tcase err, ok := <-errCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(name, t)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\treturn nil\n}\n\nfunc getTargetFromSubscribeRequest(req *gnmi.SubscribeRequest) string {\n\tswitch req.GetRequest().(type) {\n\tcase *gnmi.SubscribeRequest_Poll:\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\treturn req.GetSubscribe().GetPrefix().GetTarget()\n\t}\n\treturn \"\"\n}\n\nfunc (a *App) selectTargets(ctx context.Context, tn string) (map[string]*target.Target, error) {\n\ttargets := make(map[string]*target.Target)\n\n\ta.operLock.Lock()\n\tdefer a.operLock.Unlock()\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\n\tif tn == \"\" || tn == \"*\" {\n\t\tfor n, tc := range a.Config.Targets {\n\t\t\ttargetName := utils.GetHost(n)\n\t\t\tif t, ok := a.Targets[targetName]; ok {\n\t\t\t\ttargets[targetName] = t\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt, err := a.createTarget(ctx, tc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta.Targets[targetName] = t\n\t\t\ttargets[n] = t\n\t\t}\n\t\treturn targets, nil\n\t}\n\ttargetsNames := strings.Split(tn, \",\")\n\n\tfor i := range targetsNames {\n\t\tfor n, t := range a.Targets {\n\t\t\tif utils.GetHost(n) == targetsNames[i] {\n\t\t\t\ttargets[n] = t\n\t\t\t}\n\t\t}\n\t}\n\tif len(targets) == len(targetsNames) {\n\t\treturn targets, nil\n\t}\n\nOUTER:\n\tfor i := range targetsNames {\n\t\tfor n, tc := range a.Config.Targets {\n\t\t\ttargetName := utils.GetHost(n)\n\t\t\tif _, ok := targets[targetName]; !ok && targetName == targetsNames[i] {\n\t\t\t\tt, err := a.createTarget(ctx, tc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ta.Targets[targetName] = t\n\t\t\t\ttargets[n] = t\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\t\treturn nil, status.Errorf(codes.NotFound, \"target %q is not known\", targetsNames[i])\n\t}\n\treturn targets, nil\n}\n\nfunc (a *App) createTarget(ctx context.Context, tc *types.TargetConfig) (*target.Target, error) {\n\tt := target.NewTarget(tc)\n\ttargetDialOpts := a.dialOpts\n\tif a.Config.UseTunnelServer {\n\t\ttargetDialOpts = append(targetDialOpts,\n\t\t\tgrpc.WithContextDialer(a.tunDialerFn(ctx, tc)),\n\t\t)\n\t\tt.Config.Address = t.Config.Name\n\t}\n\terr := t.CreateGNMIClient(ctx, targetDialOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}\n"
  },
  {
    "path": "pkg/app/routes.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n)\n\nfunc (a *App) routes() {\n\tapiV1 := a.router.PathPrefix(\"/api/v1\").Subrouter()\n\ta.clusterRoutes(apiV1)\n\ta.configRoutes(apiV1)\n\ta.targetRoutes(apiV1)\n\ta.healthRoutes(apiV1)\n\ta.adminRoutes(apiV1)\n}\n\nfunc (a *App) clusterRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/cluster\", a.handleClusteringGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/cluster/rebalance\", a.handleClusterRebalance).Methods(http.MethodPost)\n\tr.HandleFunc(\"/cluster/leader\", a.handleClusteringLeaderGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/cluster/leader\", a.handleClusteringLeaderDelete).Methods(http.MethodDelete)\n\tr.HandleFunc(\"/cluster/members\", a.handleClusteringMembersGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/cluster/members/{id}/drain\", a.handleClusteringDrainInstance).Methods(http.MethodPost)\n}\n\nfunc (a *App) configRoutes(r *mux.Router) {\n\t// config\n\tr.HandleFunc(\"/config\", a.handleConfig).Methods(http.MethodGet)\n\t// config/targets\n\tr.HandleFunc(\"/config/targets\", a.handleConfigTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/targets/{id}\", a.handleConfigTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/targets\", a.handleConfigTargetsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/targets/{id}\", a.handleConfigTargetsDelete).Methods(http.MethodDelete)\n\tr.HandleFunc(\"/config/targets/{id}/subscriptions\", a.handleConfigTargetsSubscriptions).Methods(http.MethodPatch)\n\t// config/subscriptions\n\tr.HandleFunc(\"/config/subscriptions\", a.handleConfigSubscriptions).Methods(http.MethodGet)\n\t// config/outputs\n\tr.HandleFunc(\"/config/outputs\", a.handleConfigOutputs).Methods(http.MethodGet)\n\t// config/inputs\n\tr.HandleFunc(\"/config/inputs\", a.handleConfigInputs).Methods(http.MethodGet)\n\t// config/processors\n\tr.HandleFunc(\"/config/processors\", a.handleConfigProcessors).Methods(http.MethodGet)\n\t// config/clustering\n\tr.HandleFunc(\"/config/clustering\", a.handleConfigClustering).Methods(http.MethodGet)\n\t// config/api-server\n\tr.HandleFunc(\"/config/api-server\", a.handleConfigAPIServer).Methods(http.MethodGet)\n\t// config/gnmi-server\n\tr.HandleFunc(\"/config/gnmi-server\", a.handleConfigGNMIServer).Methods(http.MethodGet)\n}\n\nfunc (a *App) targetRoutes(r *mux.Router) {\n\t// targets\n\tr.HandleFunc(\"/targets\", a.handleTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/targets/{id}\", a.handleTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/targets/{id}\", a.handleTargetsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/targets/{id}\", a.handleTargetsDelete).Methods(http.MethodDelete)\n}\n\nfunc (a *App) healthRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/healthz\", a.handleHealthzGet).Methods(http.MethodGet)\n}\n\nfunc (a *App) adminRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/admin/shutdown\", a.handleAdminShutdown).Methods(http.MethodPost)\n}\n"
  },
  {
    "path": "pkg/app/set-to-notifs.go",
    "content": "// Copyright 2023 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/openconfig/ygot/gnmidiff\"\n\t\"github.com/openconfig/ygot/gnmidiff/gnmiparse\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\n// InitDiffSetToNotifsFlags used to init or reset newDiffSetRequestCmd\n// flags for gnmic-prompt mode\nfunc (a *App) InitDiffSetToNotifsFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetToNotifsSet, \"setrequest\", \"\", \"\", \"reference gNMI SetRequest textproto file for comparing against stored notifications from a device\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetToNotifsResponse, \"response\", \"\", \"\", \"gNMI Notifications textproto file (can be GetResponse or SubscribeResponse stream) for comparing against the reference SetRequest\")\n\tcmd.MarkFlagRequired(\"setrequest\")\n\tcmd.MarkFlagRequired(\"response\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSetToNotifsFull, \"full\", \"f\", false, \"show common values\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", \"diff-set-to-notifs\", flag.Name), flag)\n\t})\n}\n\nfunc (a *App) DiffSetToNotifsRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitDiffSetRequestFlags(cmd)\n\n\tformat := gnmidiff.Format{\n\t\tFull: a.Config.LocalFlags.DiffSetToNotifsFull,\n\t}\n\n\tsetreq, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetToNotifsSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnotifs, err := gnmiparse.NotifsFromFile(a.Config.LocalFlags.DiffSetToNotifsResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdiff, err := gnmidiff.DiffSetRequestToNotifications(setreq, notifs, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprint(os.Stderr, diff.Format(format))\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/set.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\nfunc (a *App) SetPreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\terr := a.Config.ValidateSetInput()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta.createCollectorDialOpts()\n\treturn a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n}\n\nfunc (a *App) SetRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitSetFlags(cmd)\n\n\tif a.Config.Format == formatEvent {\n\t\treturn fmt.Errorf(\"format event not supported for Set RPC\")\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\t// setupCloseHandler(cancel)\n\ttargetsConfig, err := a.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed getting targets config: %v\", err)\n\t}\n\tif !a.PromptMode {\n\t\tfor _, tc := range targetsConfig {\n\t\t\ta.AddTargetConfig(tc)\n\t\t}\n\t}\n\terr = a.Config.ReadSetRequestTemplate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading set request files: %v\", err)\n\t}\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets*2)\n\ta.wg.Add(numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.SetRequest(ctx, tc)\n\t}\n\ta.wg.Wait()\n\treturn a.checkErrors()\n}\n\nfunc (a *App) SetRequest(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\treqs, err := a.Config.CreateSetRequest(tc.Name)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q: failed to create set request: %v\", tc.Name, err))\n\t\treturn\n\t}\n\tfor _, req := range reqs {\n\t\ta.setRequest(ctx, tc, req)\n\t}\n}\n\nfunc (a *App) setRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.SetRequest) {\n\ta.Logger.Printf(\"sending gNMI SetRequest: prefix='%v', delete='%v', replace='%v', update='%v', extension='%v' to %s\",\n\t\treq.Prefix, req.Delete, req.Replace, req.Update, req.Extension, tc.Name)\n\tif a.Config.PrintRequest || a.Config.SetDryRun {\n\t\terr := a.PrintMsg(tc.Name, \"Set Request:\", req)\n\t\tif err != nil {\n\t\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t\t}\n\t}\n\tif a.Config.SetDryRun {\n\t\treturn\n\t}\n\tresponse, err := a.ClientSet(ctx, tc, req)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q set request failed: %v\", tc.Name, err))\n\t\treturn\n\t}\n\terr = a.PrintMsg(tc.Name, \"Set Response:\", response)\n\tif err != nil {\n\t\ta.logError(fmt.Errorf(\"target %q: %v\", tc.Name, err))\n\t}\n}\n\n// InitSetFlags used to init or reset setCmd flags for gnmic-prompt mode\nfunc (a *App) InitSetFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetPrefix, \"prefix\", \"\", \"\", \"set request prefix\")\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetDelete, \"delete\", \"\", []string{}, \"set request path to be deleted\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplace, \"replace\", \"\", []string{}, fmt.Sprintf(\"set request path:::type:::value to be replaced, type must be one of %v\", config.ValueTypes))\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdate, \"update\", \"\", []string{}, fmt.Sprintf(\"set request path:::type:::value to be updated, type must be one of %v\", config.ValueTypes))\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplace, \"union-replace\", \"\", []string{}, fmt.Sprintf(\"set request path:::type:::value to be union-replaced, type must be one of %v\", config.ValueTypes))\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplacePath, \"replace-path\", \"\", []string{}, \"set request path to be replaced\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceValue, \"replace-value\", \"\", []string{}, \"set replace request value\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceFile, \"replace-file\", \"\", []string{}, \"set replace request value in a json/yaml file\")\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdatePath, \"update-path\", \"\", []string{}, \"set request path to be updated\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateFile, \"update-file\", \"\", []string{}, \"set update request value in a json/yaml file\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateValue, \"update-value\", \"\", []string{}, \"set update request value\")\n\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplacePath, \"union-replace-path\", \"\", []string{}, \"set request path for a union_replace\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplaceValue, \"union-replace-value\", \"\", []string{}, \"set request union_replace value\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplaceFile, \"union-replace-file\", \"\", []string{}, \"set request union_replace value in a json/yaml file\")\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetDelimiter, \"delimiter\", \"\", \":::\", \"set update/replace delimiter between path, type, value\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetTarget, \"target\", \"\", \"\", \"set request target\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetRequestFile, \"request-file\", \"\", []string{}, \"set request template file(s)\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetRequestVars, \"request-vars\", \"\", \"\", \"set request variables file\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SetDryRun, \"dry-run\", \"\", false, \"prints the set request without initiating a gRPC connection\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetRequestProtoFile, \"set-proto-request-file\", \"\", []string{}, \"set request from prototext file\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SetNoTrim, \"no-trim\", \"\", false, \"won't trim the input files\")\n\t//\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceCli, \"replace-cli\", \"\", []string{}, \"a cli command to be sent as a set replace request\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetReplaceCliFile, \"replace-cli-file\", \"\", \"\", \"path to a file containing a list of commands that will be sent as a set replace request\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateCli, \"update-cli\", \"\", []string{}, \"a cli command to be sent as a set update request\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetUpdateCliFile, \"update-cli-file\", \"\", \"\", \"path to a file containing a list of commands that will be sent as a set update request\")\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SetCommitId, \"commit-id\", \"\", \"\", \"commit ID value\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitRequest, \"commit-request\", \"\", false, \"start a commit confirmed transaction\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitConfirm, \"commit-confirm\", \"\", false, \"confirm the commit ID\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitCancel, \"commit-cancel\", \"\", false, \"cancel the commit\")\n\tcmd.Flags().DurationVarP(&a.Config.LocalFlags.SetCommitRollbackDuration, \"rollback-duration\", \"\", 0, \"set the commit rollback duration\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/app/setrequest.go",
    "content": "// Copyright 2023 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//      http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/openconfig/ygot/gnmidiff\"\n\t\"github.com/openconfig/ygot/gnmidiff/gnmiparse\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\n// InitDiffSetRequestFlags used to init or reset diffSetRequestCmd\n// flags for gnmic-prompt mode\nfunc (a *App) InitDiffSetRequestFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetRequestRef, \"ref\", \"\", \"\", \"reference gNMI SetRequest textproto file for comparing against the new SetRequest\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetRequestNew, \"new\", \"\", \"\", \"new gNMI SetRequest textproto file for comparing against the reference SetRequest\")\n\tcmd.MarkFlagRequired(\"ref\")\n\tcmd.MarkFlagRequired(\"new\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSetRequestFull, \"full\", \"f\", false, \"show common values between the two SetRequests\")\n\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", \"diff-setrequest\", flag.Name), flag)\n\t})\n}\n\nfunc (a *App) DiffSetRequestRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitDiffSetRequestFlags(cmd)\n\n\tformat := gnmidiff.Format{\n\t\tFull: a.Config.LocalFlags.DiffSetRequestFull,\n\t}\n\n\tsrA, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetRequestRef)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrB, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetRequestNew)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdiff, err := gnmidiff.DiffSetRequest(srA, srB, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprint(os.Stdout, diff.Format(format))\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/subscribe.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/manifoldco/promptui\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\nconst (\n\tinitLockerRetryTimer = 1 * time.Second\n)\n\nfunc (a *App) SubscribePreRunE(cmd *cobra.Command, args []string) error {\n\ta.Config.SetLocalFlagsFromFile(cmd)\n\n\terr := a.initPluginManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta.createCollectorDialOpts()\n\treturn nil\n}\n\nfunc (a *App) SubscribeRunE(cmd *cobra.Command, args []string) error {\n\tdefer a.InitSubscribeFlags(cmd)\n\n\t// prompt mode\n\tif a.PromptMode {\n\t\treturn a.SubscribeRunPrompt(cmd, args)\n\t}\n\t//\n\tsubCfg, err := a.Config.GetSubscriptions(cmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading subscriptions config: %v\", err)\n\t}\n\n\terr = a.readConfigs()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetClustering()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetGNMIServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetAPIServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.Config.GetLoader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnumInputs := len(a.Config.Inputs)\n\tif len(subCfg) == 0 && numInputs == 0 {\n\t\treturn errors.New(\"no subscriptions or inputs configuration found\")\n\t}\n\t// only once mode subscriptions requested\n\tif allSubscriptionsModeOnce(subCfg) {\n\t\treturn a.SubscribeRunONCE(cmd, args)\n\t}\n\t// only poll mode subscriptions requested\n\tif allSubscriptionsModePoll(subCfg) {\n\t\treturn a.SubscribeRunPoll(cmd, args)\n\t}\n\t// stream subscriptions\n\terr = a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetSubscribeHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = a.Config.GetTargets()\n\tif errors.Is(err, config.ErrNoTargetsFound) {\n\t\tif !a.Config.LocalFlags.SubscribeWatchConfig &&\n\t\t\tlen(a.Config.FileConfig.GetStringMap(\"loader\")) == 0 &&\n\t\t\t!a.Config.UseTunnelServer &&\n\t\t\tnumInputs == 0 {\n\t\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t}\n\n\t//\n\tfor {\n\t\terr := a.InitLocker()\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to init locker: %v\", err)\n\t\t\ttime.Sleep(initLockerRetryTimer)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\ta.startAPIServer()\n\ta.startGnmiServer()\n\tgo a.startCluster()\n\ta.startIO()\n\n\tif a.Config.LocalFlags.SubscribeWatchConfig {\n\t\tgo a.watchConfig()\n\t}\n\n\tfor range a.ctx.Done() {\n\t\treturn a.ctx.Err()\n\t}\n\treturn nil\n}\n\nfunc (a *App) subscribeStream(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\ta.TargetSubscribeStream(ctx, tc)\n}\n\nfunc (a *App) subscribeOnce(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\terr := a.TargetSubscribeOnce(ctx, tc)\n\tif err != nil {\n\t\ta.logError(err)\n\t}\n}\n\nfunc (a *App) subscribePoll(ctx context.Context, tc *types.TargetConfig) {\n\tdefer a.wg.Done()\n\ta.TargetSubscribePoll(ctx, tc)\n}\n\n// InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode\nfunc (a *App) InitSubscribeFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribePrefix, \"prefix\", \"\", \"\", \"subscribe request prefix\")\n\tcmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SubscribePath, \"path\", \"\", []string{}, \"subscribe request paths\")\n\t//cmd.MarkFlagRequired(\"path\")\n\tcmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeQos, \"qos\", \"q\", 0, \"qos marking\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeUpdatesOnly, \"updates-only\", \"\", false, \"only updates to current state should be sent\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeMode, \"mode\", \"\", \"stream\", \"one of: once, stream, poll\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeStreamMode, \"stream-mode\", \"\", \"target-defined\", \"one of: on-change, sample, target-defined\")\n\tcmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeSampleInterval, \"sample-interval\", \"i\", 0,\n\t\t\"sample interval as a decimal number and a suffix unit, such as \\\"10s\\\" or \\\"1m30s\\\"\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSuppressRedundant, \"suppress-redundant\", \"\", false, \"suppress redundant update if the subscribed value didn't not change\")\n\tcmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeHeartbeatInterval, \"heartbeat-interval\", \"\", 0, \"heartbeat interval in case suppress-redundant is enabled\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeModel, \"model\", \"\", []string{}, \"subscribe request used model(s)\")\n\tcmd.Flags().BoolVar(&a.Config.LocalFlags.SubscribeQuiet, \"quiet\", false, \"suppress stdout printing\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeTarget, \"target\", \"\", \"\", \"subscribe request target\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSetTarget, \"set-target\", \"\", false, \"set target name in gNMI Path prefix\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeName, \"name\", \"n\", []string{}, \"reference subscriptions by name, must be defined in gnmic config file\")\n\tcmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeOutput, \"output\", \"\", []string{}, \"reference to output groups by name, must be defined in gnmic config file\")\n\tcmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeWatchConfig, \"watch-config\", \"\", false, \"watch configuration changes, add or delete subscribe targets accordingly\")\n\tcmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeBackoff, \"backoff\", \"\", 0, \"backoff time between subscribe requests\")\n\tcmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeLockRetry, \"lock-retry\", \"\", 5*time.Second, \"time to wait between target lock attempts\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistorySnapshot, \"history-snapshot\", \"\", \"\", \"sets the snapshot time in a historical subscription, nanoseconds since Unix epoch or RFC3339 format\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistoryStart, \"history-start\", \"\", \"\", \"sets the start time in a historical range subscription, nanoseconds since Unix epoch or RFC3339 format\")\n\tcmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistoryEnd, \"history-end\", \"\", \"\", \"sets the end time in a historical range subscription, nanoseconds since Unix epoch or RFC3339 format\")\n\tcmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeDepth, \"depth\", \"\", 0, \"depth extension value\")\n\t//\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) readConfigs() error {\n\tvar err error\n\t_, err = a.Config.GetOutputs()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading outputs config: %v\", err)\n\t}\n\t_, err = a.Config.GetInputs()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading inputs config: %v\", err)\n\t}\n\t_, err = a.Config.GetActions()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading actions config: %v\", err)\n\t}\n\t_, err = a.Config.GetEventProcessors()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading event processors config: %v\", err)\n\t}\n\t_, err = a.LoadProtoFiles()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed loading proto files: %v\", err)\n\t}\n\treturn nil\n}\n\nconst (\n\tsubscriptionModeONCE = \"ONCE\"\n\tsubscriptionModePOLL = \"POLL\"\n)\n\nfunc (a *App) StartTargetsManager(ctx context.Context) {\n\tdefer func() {\n\t\tfor _, o := range a.Outputs {\n\t\t\to.Close()\n\t\t}\n\t}()\n\n\tfor t := range a.targetsChan {\n\t\tif a.Config.Debug {\n\t\t\ta.Logger.Printf(\"starting target %+v\", t)\n\t\t}\n\t\tif t == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta.operLock.RLock()\n\t\t_, ok := a.activeTargets[t.Config.Name]\n\t\ta.operLock.RUnlock()\n\t\tif ok {\n\t\t\tif a.Config.Debug {\n\t\t\t\ta.Logger.Printf(\"target %q listener already active\", t.Config.Name)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\ta.operLock.Lock()\n\t\ta.activeTargets[t.Config.Name] = struct{}{}\n\t\ta.operLock.Unlock()\n\n\t\ta.Logger.Printf(\"starting target %q listener\", t.Config.Name)\n\t\tgo func(t *target.Target) {\n\t\t\tnumOnceSubscriptions := t.NumberOfOnceSubscriptions()\n\t\t\tremainingOnceSubscriptions := numOnceSubscriptions\n\t\t\tnumSubscriptions := len(t.Subscriptions)\n\t\t\trspChan, errChan := t.ReadSubscriptions()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase rsp := <-rspChan:\n\t\t\t\t\tsubscribeResponseReceivedCounter.WithLabelValues(t.Config.Name, rsp.SubscriptionConfig.Name).Add(1)\n\t\t\t\t\tif a.Config.Debug {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q: gNMI Subscribe Response: %+v\", t.Config.Name, rsp)\n\t\t\t\t\t}\n\t\t\t\t\terr := t.DecodeProtoBytes(rsp.Response)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q: failed to decode proto bytes: %v\", t.Config.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tm := outputs.Meta{\n\t\t\t\t\t\t\"source\":            t.Config.Name,\n\t\t\t\t\t\t\"format\":            a.Config.Format,\n\t\t\t\t\t\t\"subscription-name\": rsp.SubscriptionName,\n\t\t\t\t\t}\n\t\t\t\t\tif rsp.SubscriptionConfig.Target != \"\" {\n\t\t\t\t\t\tm[\"subscription-target\"] = rsp.SubscriptionConfig.Target\n\t\t\t\t\t}\n\t\t\t\t\tfor k, v := range t.Config.EventTags {\n\t\t\t\t\t\tm[k] = v\n\t\t\t\t\t}\n\n\t\t\t\t\t// Allow overridden outputs per subscription\n\t\t\t\t\t// If both target and subscription have a specified Output, the subscription's Output will be used\n\t\t\t\t\tvar outs []string\n\t\t\t\t\tif len(rsp.SubscriptionConfig.Outputs) > 0 {\n\t\t\t\t\t\touts = rsp.SubscriptionConfig.Outputs\n\t\t\t\t\t} else {\n\t\t\t\t\t\touts = t.Config.Outputs\n\t\t\t\t\t}\n\n\t\t\t\t\ta.export(ctx, rsp.Response, m, outs...)\n\t\t\t\t\tif remainingOnceSubscriptions > 0 {\n\t\t\t\t\t\tif a.subscriptionMode(rsp.SubscriptionName) == subscriptionModeONCE {\n\t\t\t\t\t\t\tswitch rsp.Response.Response.(type) {\n\t\t\t\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\t\t\t\tremainingOnceSubscriptions--\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif remainingOnceSubscriptions == 0 && numSubscriptions == numOnceSubscriptions {\n\t\t\t\t\t\ta.operLock.Lock()\n\t\t\t\t\t\tdelete(a.activeTargets, t.Config.Name)\n\t\t\t\t\t\ta.operLock.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\tcase tErr := <-errChan:\n\t\t\t\t\tif errors.Is(tErr.Err, io.EOF) {\n\t\t\t\t\t\ta.Logger.Printf(\"target %q: subscription %s closed stream(EOF)\", t.Config.Name, tErr.SubscriptionName)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsubscribeResponseFailedCounter.WithLabelValues(t.Config.Name, tErr.SubscriptionName).Inc()\n\t\t\t\t\t\ta.Logger.Printf(\"target %q: subscription %s rcv error: %v\", t.Config.Name, tErr.SubscriptionName, tErr.Err)\n\t\t\t\t\t}\n\t\t\t\t\tif remainingOnceSubscriptions > 0 {\n\t\t\t\t\t\tif a.subscriptionMode(tErr.SubscriptionName) == subscriptionModeONCE {\n\t\t\t\t\t\t\tremainingOnceSubscriptions--\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif remainingOnceSubscriptions == 0 && numSubscriptions == numOnceSubscriptions {\n\t\t\t\t\t\ta.operLock.Lock()\n\t\t\t\t\t\tdelete(a.activeTargets, t.Config.Name)\n\t\t\t\t\t\ta.operLock.Unlock()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\tcase <-t.StopChan:\n\t\t\t\t\ta.operLock.Lock()\n\t\t\t\t\tdelete(a.activeTargets, t.Config.Name)\n\t\t\t\t\ta.operLock.Unlock()\n\t\t\t\t\ta.Logger.Printf(\"target %q: listener stopped\", t.Config.Name)\n\t\t\t\t\treturn\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\ta.operLock.Lock()\n\t\t\t\t\tdelete(a.activeTargets, t.Config.Name)\n\t\t\t\t\ta.operLock.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(t)\n\t}\n\tfor range ctx.Done() {\n\t\treturn\n\t}\n}\n\nfunc (a *App) export(ctx context.Context, rsp *gnmi.SubscribeResponse, m outputs.Meta, outs ...string) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\tgo a.updateCache(ctx, rsp, m)\n\twg := new(sync.WaitGroup)\n\t// target has no explicitly defined outputs\n\tif len(outs) == 0 {\n\t\twg.Add(len(a.Outputs))\n\t\tfor _, o := range a.Outputs {\n\t\t\tgo func(o outputs.Output) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tdefer a.operLock.RUnlock()\n\t\t\t\ta.operLock.RLock()\n\t\t\t\to.Write(ctx, rsp, m)\n\t\t\t}(o)\n\t\t}\n\t\twg.Wait()\n\t\treturn\n\t}\n\t// write to the outputs defined under the target\n\tfor _, name := range outs {\n\t\ta.operLock.RLock()\n\t\tif o, ok := a.Outputs[name]; ok {\n\t\t\twg.Add(1)\n\t\t\tgo func(o outputs.Output) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\to.Write(ctx, rsp, m)\n\t\t\t}(o)\n\t\t}\n\t\ta.operLock.RUnlock()\n\t}\n\twg.Wait()\n}\n\nfunc (a *App) updateCache(ctx context.Context, rsp *gnmi.SubscribeResponse, m outputs.Meta) {\n\tif a.c == nil {\n\t\treturn\n\t}\n\tr := proto.Clone(rsp).(*gnmi.SubscribeResponse)\n\tswitch r := r.Response.(type) {\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tif r.Update.GetPrefix() == nil {\n\t\t\tr.Update.Prefix = new(gnmi.Path)\n\t\t}\n\t\tif r.Update.GetPrefix().GetTarget() == \"\" {\n\t\t\tr.Update.Prefix.Target = utils.GetHost(m[\"source\"])\n\t\t}\n\t\ttarget := r.Update.GetPrefix().GetTarget()\n\t\tif target == \"\" {\n\t\t\ta.Logger.Printf(\"response missing target\")\n\t\t\treturn\n\t\t}\n\t\tif a.Config.Debug {\n\t\t\ta.Logger.Printf(\"updating target %q cache\", target)\n\t\t}\n\t\tsub := m[\"subscription-name\"]\n\t\ta.c.Write(ctx, sub, &gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: r.Update}})\n\t}\n}\n\nfunc (a *App) subscriptionMode(name string) string {\n\tif sub, ok := a.Config.Subscriptions[name]; ok {\n\t\treturn strings.ToUpper(sub.Mode)\n\t}\n\treturn \"\"\n}\n\n// polledSubscriptionsTargets returns a map of target name to a list of subscription names that have Mode == POLL\nfunc (a *App) polledSubscriptionsTargets() map[string][]string {\n\tresult := make(map[string][]string)\n\tfor tn, target := range a.Targets {\n\t\tfor _, sub := range target.Subscriptions {\n\t\t\tif strings.ToUpper(sub.Mode) == subscriptionModePOLL {\n\t\t\t\tif result[tn] == nil {\n\t\t\t\t\tresult[tn] = make([]string, 0)\n\t\t\t\t}\n\t\t\t\tresult[tn] = append(result[tn], sub.Name)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (a *App) handlePolledSubscriptions() error {\n\tpolledTargetsSubscriptions := a.polledSubscriptionsTargets()\n\n\tif len(polledTargetsSubscriptions) == 0 {\n\t\treturn nil\n\t}\n\tmo := &formatters.MarshalOptions{\n\t\tMultiline:        true,\n\t\tIndent:           \"  \",\n\t\tFormat:           a.Config.Format,\n\t\tCalculateLatency: a.Config.GlobalFlags.CalculateLatency,\n\t}\n\t// handle initial responses if updates-only is not set\n\tif !a.Config.SubscribeUpdatesOnly {\n\t\tfor targetName := range polledTargetsSubscriptions {\n\t\t\ta.operLock.RLock()\n\t\t\tt, ok := a.Targets[targetName]\n\t\t\ta.operLock.RUnlock()\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unknown target name %q\", targetName)\n\t\t\t}\n\t\t\trspCh, errCh := t.ReadSubscriptions()\n\t\tSUBS:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase rsp := <-rspCh:\n\t\t\t\t\tb, err := mo.Marshal(rsp.Response, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"target '%s', subscription '%s': poll response formatting error: %v\", targetName, rsp.SubscriptionName, err)\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(string(b))\n\t\t\t\t\tswitch rsp := rsp.Response.Response.(type) {\n\t\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\t\tfmt.Printf(\"received sync response '%t' from '%s'\\n\", rsp.SyncResponse, targetName)\n\t\t\t\t\t\tbreak SUBS // current target done sending initial updates\n\t\t\t\t\t}\n\t\t\t\tcase tErr := <-errCh:\n\t\t\t\t\tif tErr.Err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"target '%s', subscription '%s': poll response error: %v\", targetName, tErr.SubscriptionName, tErr.Err)\n\t\t\t\t\t}\n\t\t\t\tcase <-a.ctx.Done():\n\t\t\t\t\treturn a.ctx.Err()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tpollTargets := make([]string, 0, len(polledTargetsSubscriptions))\n\tfor t := range polledTargetsSubscriptions {\n\t\tpollTargets = append(pollTargets, t)\n\t}\n\tsort.Slice(pollTargets, func(i, j int) bool {\n\t\treturn pollTargets[i] < pollTargets[j]\n\t})\n\ts := promptui.Select{\n\t\tLabel:        \"select target to poll\",\n\t\tItems:        pollTargets,\n\t\tHideSelected: true,\n\t}\n\twaitChan := make(chan struct{}, 1)\n\twaitChan <- struct{}{}\n\nOUTER:\n\tfor {\n\t\tselect {\n\t\tcase <-waitChan:\n\t\t\t_, name, err := s.Run()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"failed selecting target to poll: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tss := promptui.Select{\n\t\t\t\tLabel:        \"select subscription to poll\",\n\t\t\t\tItems:        polledTargetsSubscriptions[name],\n\t\t\t\tHideSelected: true,\n\t\t\t}\n\t\t\t_, subName, err := ss.Run()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"failed selecting subscription to poll: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = a.clientSubscribePoll(a.Context(), name, subName)\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\tfmt.Printf(\"target '%s', subscription '%s': poll response error:%v\\n\", name, subName, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ta.operLock.RLock()\n\t\t\tt, ok := a.Targets[name]\n\t\t\ta.operLock.RUnlock()\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unknown target name %q\", name)\n\t\t\t}\n\t\t\trspCh, errCh := t.ReadSubscriptions()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-a.Context().Done():\n\t\t\t\t\treturn a.Context().Err()\n\t\t\t\tcase tErr := <-errCh:\n\t\t\t\t\tif tErr.Err != nil {\n\t\t\t\t\t\tfmt.Printf(\"received error from target '%s': %v\\n\", name, err)\n\t\t\t\t\t\twaitChan <- struct{}{}\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\tcase rsp, ok := <-rspCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\twaitChan <- struct{}{}\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t\tif rsp == nil {\n\t\t\t\t\t\tfmt.Printf(\"received empty response from target '%s'\\n\", name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tswitch rsp := rsp.Response.Response.(type) {\n\t\t\t\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\t\t\t\tfmt.Printf(\"received sync response '%t' from '%s'\\n\", rsp.SyncResponse, name)\n\t\t\t\t\t\twaitChan <- struct{}{}\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t\tb, err := mo.Marshal(rsp.Response, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"target '%s', subscription '%s': poll response formatting error:%v\\n\", name, subName, err)\n\t\t\t\t\t\tfmt.Println(rsp.Response)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(string(b))\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-a.ctx.Done():\n\t\t\treturn a.Context().Err()\n\t\t}\n\t}\n\n}\n\nfunc (a *App) startIO() {\n\tgo a.StartTargetsManager(a.ctx)\n\ta.InitOutputs(a.ctx)\n\ta.InitInputs(a.ctx)\n\n\tif !a.inCluster() {\n\t\tgo a.startLoader(a.ctx)\n\t\tvar limiter *time.Ticker\n\t\tif a.Config.LocalFlags.SubscribeBackoff > 0 {\n\t\t\tlimiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)\n\t\t}\n\n\t\tif !a.Config.UseTunnelServer {\n\t\t\tfor _, tc := range a.Config.Targets {\n\t\t\t\ta.wg.Add(1)\n\t\t\t\tgo a.subscribeStream(a.ctx, tc)\n\t\t\t\tif limiter != nil {\n\t\t\t\t\t<-limiter.C\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif limiter != nil {\n\t\t\tlimiter.Stop()\n\t\t}\n\t\ta.wg.Wait()\n\t}\n}\n\nfunc allSubscriptionsModeOnce(subs map[string]*types.SubscriptionConfig) bool {\n\tif len(subs) == 0 {\n\t\treturn false\n\t}\n\tfor _, sub := range subs {\n\t\tif strings.ToUpper(sub.Mode) != \"ONCE\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc allSubscriptionsModePoll(subs map[string]*types.SubscriptionConfig) bool {\n\tif len(subs) == 0 {\n\t\treturn false\n\t}\n\tfor _, sub := range subs {\n\t\tif strings.ToUpper(sub.Mode) != \"POLL\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/app/subscribe_once.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc (a *App) SubscribeRunONCE(_ *cobra.Command, _ []string) error {\n\ta.c = nil // todo:\n\terr := a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to init tunnel server: %v\", err)\n\t}\n\t_, err = a.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t}\n\terr = a.readConfigs()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//\n\ta.InitOutputs(a.ctx)\n\n\tvar limiter *time.Ticker\n\tif a.Config.LocalFlags.SubscribeBackoff > 0 {\n\t\tlimiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)\n\t}\n\tnumTargets := len(a.Config.Targets)\n\ta.errCh = make(chan error, numTargets)\n\ta.wg.Add(numTargets)\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.subscribeOnce(a.ctx, tc)\n\t\tif limiter != nil {\n\t\t\t<-limiter.C\n\t\t}\n\t}\n\tif limiter != nil {\n\t\tlimiter.Stop()\n\t}\n\ta.wg.Wait()\n\treturn a.checkErrors()\n}\n"
  },
  {
    "path": "pkg/app/subscribe_poll.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc (a *App) SubscribeRunPoll(cmd *cobra.Command, args []string) error {\n\terr := a.initTunnelServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    a.tunServerAddTargetHandler,\n\t\tDeleteTargetHandler: a.tunServerDeleteTargetHandler,\n\t\tRegisterHandler:     a.tunServerRegisterHandler,\n\t\tHandler:             a.tunServerHandler,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to init tunnel server: %v\", err)\n\t}\n\t_, err = a.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t}\n\n\terr = a.readConfigs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo a.StartTargetsManager(a.ctx)\n\n\ta.wg.Add(len(a.Config.Targets))\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.subscribePoll(a.ctx, tc)\n\t}\n\ta.wg.Wait()\n\treturn a.handlePolledSubscriptions()\n}\n"
  },
  {
    "path": "pkg/app/subscribe_prompt.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc (a *App) SubscribeRunPrompt(cmd *cobra.Command, args []string) error {\n\t// stop running subscriptions\n\tfor _, t := range a.Targets {\n\t\tt.StopSubscriptions()\n\t}\n\t// reset subscriptions config map\n\ta.Config.Subscriptions = make(map[string]*types.SubscriptionConfig)\n\n\t// read targets\n\t_, err := a.Config.GetTargets()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading targets config: %v\", err)\n\t}\n\tsubCfg, err := a.Config.GetSubscriptions(cmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed reading subscriptions config: %v\", err)\n\t}\n\t// only once mode subscriptions requested\n\tif allSubscriptionsModeOnce(subCfg) {\n\t\treturn a.SubscribeRunONCE(cmd, args)\n\t}\n\t// only poll mode subscriptions requested\n\tif allSubscriptionsModePoll(subCfg) {\n\t\treturn a.SubscribeRunPoll(cmd, args)\n\t}\n\t// stream+once mode subscriptions\n\terr = a.readConfigs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo a.StartTargetsManager(a.ctx)\n\n\ta.InitOutputs(a.ctx)\n\n\tvar limiter *time.Ticker\n\tif a.Config.LocalFlags.SubscribeBackoff > 0 {\n\t\tlimiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff)\n\t}\n\n\ta.wg.Add(len(a.Config.Targets))\n\tfor _, tc := range a.Config.Targets {\n\t\tgo a.subscribeStream(a.ctx, tc)\n\t\tif limiter != nil {\n\t\t\t<-limiter.C\n\t\t}\n\t}\n\tif limiter != nil {\n\t\tlimiter.Stop()\n\t}\n\ta.wg.Wait()\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/target.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/fullstorydev/grpcurl\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\n// initTarget initializes a new target given its name.\n// it assumes that the configLock as well as the operLock\n// are acquired.\nfunc (a *App) initTarget(tc *types.TargetConfig) (*target.Target, error) {\n\tt, ok := a.Targets[tc.Name]\n\tif !ok {\n\t\tt := target.NewTarget(tc)\n\t\tfor _, subName := range tc.Subscriptions {\n\t\t\tif sub, ok := a.Config.Subscriptions[subName]; ok {\n\t\t\t\tt.Subscriptions[subName] = sub\n\t\t\t}\n\t\t}\n\t\tif len(t.Subscriptions) == 0 {\n\t\t\tfor n, sub := range a.Config.Subscriptions {\n\t\t\t\tt.Subscriptions[n] = sub\n\t\t\t}\n\t\t}\n\t\terr := a.parseProtoFiles(t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta.Targets[t.Config.Name] = t\n\t\treturn t, nil\n\t}\n\treturn t, nil\n}\n\nfunc (a *App) stopTarget(ctx context.Context, name string) error {\n\tif a.Targets == nil {\n\t\treturn nil\n\t}\n\ta.operLock.Lock()\n\tdefer a.operLock.Unlock()\n\tif _, ok := a.Targets[name]; !ok {\n\t\treturn fmt.Errorf(\"target %q does not exist\", name)\n\t}\n\n\ta.Logger.Printf(\"stopping target %q\", name)\n\tt := a.Targets[name]\n\tt.StopSubscriptions()\n\tdelete(a.Targets, name)\n\tif a.locker == nil {\n\t\treturn nil\n\t}\n\treturn a.locker.Unlock(ctx, a.targetLockKey(name))\n}\n\nfunc (a *App) DeleteTarget(ctx context.Context, name string) error {\n\tif a.Targets == nil {\n\t\treturn nil\n\t}\n\tif !a.targetConfigExists(name) {\n\t\treturn fmt.Errorf(\"target %q does not exist\", name)\n\t}\n\ta.configLock.Lock()\n\tdelete(a.Config.Targets, name)\n\ta.configLock.Unlock()\n\ta.Logger.Printf(\"target %q deleted from config\", name)\n\t// delete from oper map\n\ta.operLock.Lock()\n\tdefer a.operLock.Unlock()\n\tif cfn, ok := a.targetsLockFn[name]; ok {\n\t\tcfn()\n\t}\n\tif a.c != nil {\n\t\ta.c.DeleteTarget(name)\n\t}\n\tif t, ok := a.Targets[name]; ok {\n\t\tdelete(a.Targets, name)\n\t\tt.Close()\n\t\tif a.locker != nil {\n\t\t\treturn a.locker.Unlock(ctx, a.targetLockKey(name))\n\t\t}\n\t}\n\treturn nil\n}\n\n// UpdateTargetConfig updates the subscriptions for an existing target\nfunc (a *App) UpdateTargetSubscription(ctx context.Context, name string, subs []string) error {\n\ta.configLock.Lock()\n\tfor _, subName := range subs {\n\t\tif _, ok := a.Config.Subscriptions[subName]; !ok {\n\t\t\ta.configLock.Unlock()\n\t\t\treturn fmt.Errorf(\"subscription %q does not exist\", subName)\n\t\t}\n\t}\n\ttargetConfig := a.Config.Targets[name]\n\ttargetConfig.Subscriptions = subs\n\ta.configLock.Unlock()\n\n\tif err := a.stopTarget(ctx, name); err != nil {\n\t\treturn err\n\t}\n\n\tgo a.TargetSubscribeStream(ctx, targetConfig)\n\treturn nil\n}\n\n// AddTargetConfig adds a *TargetConfig to the configuration map\nfunc (a *App) AddTargetConfig(tc *types.TargetConfig) {\n\ta.Logger.Printf(\"adding target %s\", tc)\n\t_, ok := a.Config.Targets[tc.Name]\n\tif ok {\n\t\treturn\n\t}\n\tif tc.BufferSize <= 0 {\n\t\ttc.BufferSize = a.Config.TargetBufferSize\n\t}\n\tif tc.RetryTimer <= 0 {\n\t\ttc.RetryTimer = a.Config.Retry\n\t}\n\n\ta.configLock.Lock()\n\tdefer a.configLock.Unlock()\n\ta.Config.Targets[tc.Name] = tc\n}\n\nfunc (a *App) parseProtoFiles(t *target.Target) error {\n\tif len(t.Config.ProtoFiles) == 0 {\n\t\tt.RootDesc = a.rootDesc\n\t\treturn nil\n\t}\n\ta.Logger.Printf(\"target %q loading proto files...\", t.Config.Name)\n\tdescSource, err := grpcurl.DescriptorSourceFromProtoFiles(t.Config.ProtoDirs, t.Config.ProtoFiles...)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to load proto files: %v\", err)\n\t\treturn err\n\t}\n\tt.RootDesc, err = descSource.FindSymbol(\"Nokia.SROS.root\")\n\tif err != nil {\n\t\ta.Logger.Printf(\"target %q could not get symbol 'Nokia.SROS.root': %v\", t.Config.Name, err)\n\t\treturn err\n\t}\n\ta.Logger.Printf(\"target %q loaded proto files\", t.Config.Name)\n\treturn nil\n}\n\nfunc (a *App) targetConfigExists(name string) bool {\n\ta.configLock.RLock()\n\t_, ok := a.Config.Targets[name]\n\ta.configLock.RUnlock()\n\treturn ok\n}\n"
  },
  {
    "path": "pkg/app/tree.go",
    "content": "package app\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nfunc (a *App) InitTreeFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\t//\n\tcmd.Flags().BoolVar(&a.Config.TreeFlat, \"flat\", false, \"print flat commands tree\")\n\tcmd.Flags().BoolVar(&a.Config.TreeDetails, \"details\", false, \"print commands flags\")\n\t//\n\tcmd.Flags().VisitAll(func(flag *pflag.Flag) {\n\t\ta.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n\nfunc (a *App) RunETree(cmd *cobra.Command, args []string) error {\n\tif a.Config.TreeFlat {\n\t\ttreeFlat(a.RootCmd, \"\")\n\t\treturn nil\n\t}\n\ta.tree(a.RootCmd, \"\")\n\treturn nil\n}\n\nfunc (a *App) tree(c *cobra.Command, indent string) error {\n\tfmt.Printf(\"%s\", c.Use)\n\tif !c.HasSubCommands() {\n\t\tif c.HasLocalFlags() && a.Config.TreeDetails {\n\t\t\tsections := make([]string, 0)\n\t\t\tc.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\t\t\tflagSection := \"\"\n\t\t\t\tif flag.Shorthand != \"\" && flag.ShorthandDeprecated == \"\" {\n\t\t\t\t\tflagSection = fmt.Sprintf(\"[-%s | --%s]\", flag.Shorthand, flag.Name)\n\t\t\t\t} else {\n\t\t\t\t\tflagSection = fmt.Sprintf(\"[--%s]\", flag.Name)\n\t\t\t\t}\n\t\t\t\tsections = append(sections, flagSection)\n\t\t\t})\n\t\t\tfmt.Printf(\" %s\", strings.Join(sections, \" \"))\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n\tsubCmds := c.Commands()\n\tnumSubCommands := len(subCmds)\n\tfor i, subC := range subCmds {\n\t\tadd := \" │   \"\n\t\tif i == numSubCommands-1 {\n\t\t\tfmt.Print(indent + \" └─── \")\n\t\t\tadd = \"     \"\n\t\t} else {\n\t\t\tfmt.Print(indent + \" ├─── \")\n\t\t}\n\n\t\terr := a.tree(subC, indent+add)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc treeFlat(c *cobra.Command, prefix string) {\n\tprefix += \" \" + c.Use\n\tfmt.Println(prefix)\n\tfor _, subC := range c.Commands() {\n\t\ttreeFlat(subC, prefix)\n\t}\n}\n"
  },
  {
    "path": "pkg/app/tunnel.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\ttpb \"github.com/openconfig/grpctunnel/proto/tunnel\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (a *App) initTunnelServer(tsc tunnel.ServerConfig) error {\n\tif !a.Config.UseTunnelServer {\n\t\treturn nil\n\t}\n\terr := a.Config.GetTunnelServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\terr = a.startTunnelServer(tsc)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to start tunnel server: %v\", err)\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (a *App) startTunnelServer(tsc tunnel.ServerConfig) error {\n\tif a.Config.TunnelServer == nil {\n\t\treturn nil\n\t}\n\tvar err error\n\ta.tunServer, err = tunnel.NewServer(tsc)\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to create a tunnel server: %v\", err)\n\t\treturn err\n\n\t}\n\t// create tunnel server options\n\topts, err := a.gRPCTunnelServerOpts()\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed to build gRPC tunnel server options: %v\", err)\n\t\treturn err\n\t}\n\ta.grpcTunnelSrv = grpc.NewServer(opts...)\n\t// register the tunnel service with the grpc server\n\ttpb.RegisterTunnelServer(a.grpcTunnelSrv, a.tunServer)\n\t//\n\tvar l net.Listener\n\tnetwork := \"tcp\"\n\taddr := a.Config.TunnelServer.Address\n\tif strings.HasPrefix(a.Config.TunnelServer.Address, \"unix://\") {\n\t\tnetwork = \"unix\"\n\t\taddr = strings.TrimPrefix(addr, \"unix://\")\n\t}\n\n\tctx, cancel := context.WithCancel(a.ctx)\n\tfor {\n\t\tl, err = net.Listen(network, addr)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to start gRPC tunnel server listener: %v\", err)\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tgo func() {\n\t\terr = a.grpcTunnelSrv.Serve(l)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"gRPC tunnel server shutdown: %v\", err)\n\t\t}\n\t\tcancel()\n\t}()\n\tdefer a.grpcTunnelSrv.Stop()\n\tfor range ctx.Done() {\n\t}\n\treturn ctx.Err()\n}\n\nfunc (a *App) gRPCTunnelServerOpts() ([]grpc.ServerOption, error) {\n\topts := make([]grpc.ServerOption, 0)\n\tif a.Config.TunnelServer.EnableMetrics && a.reg != nil {\n\t\tgrpcMetrics := grpc_prometheus.NewServerMetrics()\n\t\topts = append(opts,\n\t\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t\t)\n\t\ta.reg.MustRegister(grpcMetrics)\n\t}\n\n\tif a.Config.TunnelServer.TLS == nil {\n\t\treturn opts, nil\n\t}\n\n\ttlscfg, err := utils.NewTLSConfig(\n\t\ta.Config.TunnelServer.TLS.CaFile,\n\t\ta.Config.TunnelServer.TLS.CertFile,\n\t\ta.Config.TunnelServer.TLS.KeyFile,\n\t\ta.Config.TunnelServer.TLS.ClientAuth,\n\t\tfalse,\n\t\ttrue,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlscfg != nil {\n\t\topts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg)))\n\t}\n\n\treturn opts, nil\n}\n\nfunc (a *App) tunServerAddTargetHandler(tt tunnel.Target) error {\n\ta.Logger.Printf(\"tunnel server discovered target %+v\", tt)\n\ttc := a.getTunnelTargetMatch(tt)\n\tif tc == nil {\n\t\ta.Logger.Printf(\"target %+v ignored\", tt)\n\t\treturn nil\n\t}\n\ta.ttm.Lock()\n\ta.tunTargets[tt] = struct{}{}\n\ta.ttm.Unlock()\n\treturn nil\n}\n\nfunc (a *App) tunServerAddTargetSubscribeHandler(tt tunnel.Target) error {\n\ta.Logger.Printf(\"tunnel server discovered target %+v\", tt)\n\ttc := a.getTunnelTargetMatch(tt)\n\tif tc == nil {\n\t\ta.Logger.Printf(\"target %+v ignored\", tt)\n\t\treturn nil\n\t}\n\ta.ttm.Lock()\n\ta.tunTargets[tt] = struct{}{}\n\ta.AddTargetConfig(tc)\n\ta.ttm.Unlock()\n\n\ta.operLock.Lock()\n\tt, err := a.initTarget(tc)\n\ta.operLock.Unlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.targetsChan <- t\n\ta.wg.Add(1)\n\tgo a.subscribeStream(a.ctx, tc)\n\treturn nil\n}\n\nfunc (a *App) tunServerDeleteTargetHandler(tt tunnel.Target) error {\n\ta.Logger.Printf(\"tunnel server target %+v deregister request\", tt)\n\ta.ttm.Lock()\n\tdefer a.ttm.Unlock()\n\tif cfn, ok := a.tunTargetCfn[tt]; ok {\n\t\tcfn()\n\t\tdelete(a.tunTargetCfn, tt)\n\t\tdelete(a.tunTargets, tt)\n\t\tif err := a.DeleteTarget(a.ctx, tt.ID); err != nil {\n\t\t\ta.Logger.Printf(\"failed deleting tunnel target %q: %v\", tt.ID, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *App) tunServerRegisterHandler(ss tunnel.ServerSession) error {\n\treturn nil\n}\n\nfunc (a *App) tunServerHandler(ss tunnel.ServerSession, rwc io.ReadWriteCloser) error {\n\treturn nil\n}\n\n// tunDialerFn is used to build a grpc Option that sets a custom dialer for tunnel targets.\nfunc (a *App) tunDialerFn(ctx context.Context, tc *types.TargetConfig) func(context.Context, string) (net.Conn, error) {\n\treturn func(_ context.Context, _ string) (net.Conn, error) {\n\t\ttt := tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType}\n\t\ta.ttm.RLock()\n\t\t_, ok := a.tunTargets[tt]\n\t\ta.ttm.RUnlock()\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unknown tunnel target %+v\", tt)\n\t\t}\n\t\ta.Logger.Printf(\"dialing tunnel connection for tunnel target %q\", tc.Name)\n\t\tconn, err := tunnel.ServerConn(ctx, a.tunServer, &tt)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed dialing tunnel connection for target %q: %v\", tc.Name, err)\n\t\t}\n\t\treturn conn, err\n\t}\n}\n\nfunc (a *App) getTunnelTargetMatch(tt tunnel.Target) *types.TargetConfig {\n\tif len(a.Config.TunnelServer.Targets) == 0 {\n\t\t// no target matches defined, accept only GNMI_GNOI type\n\t\tif tt.Type == \"GNMI_GNOI\" {\n\t\t\t// create a default target config\n\t\t\ttc := &types.TargetConfig{Name: tt.ID, TunnelTargetType: tt.Type}\n\t\t\terr := a.Config.SetTargetConfigDefaults(tc)\n\t\t\tif err != nil {\n\t\t\t\ta.Logger.Printf(\"failed to set target %q config defaults: %v\", tt.ID, err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\ttc.Address = tc.Name\n\t\t\treturn tc\n\t\t}\n\t\treturn nil\n\t}\n\tfor _, tm := range a.Config.TunnelServer.Targets {\n\t\t// check if the discovered target matches one of the configured types\n\t\tok, err := regexp.MatchString(tm.Type, tt.Type)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"regex %q eval failed with string %q: %v\", tm.Type, tt.Type, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// check if the discovered target matches one of the configured IDs\n\t\tok, err = regexp.MatchString(tm.ID, tt.ID)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"regex %q eval failed with string %q: %v\", tm.ID, tt.ID, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// target has a match\n\t\tif a.Config.Debug {\n\t\t\ta.Logger.Printf(\"target %+v matches %+v\", tt, tm)\n\t\t}\n\t\ttc := new(types.TargetConfig)\n\t\t*tc = tm.Config\n\t\ttc.Name = tt.ID\n\t\ttc.TunnelTargetType = tt.Type\n\t\terr = a.Config.SetTargetConfigDefaults(tc)\n\t\tif err != nil {\n\t\t\ta.Logger.Printf(\"failed to set target %q config defaults: %v\", tt.ID, err)\n\t\t\tcontinue\n\t\t}\n\t\ttc.Address = tc.Name\n\t\treturn tc\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/app/utils.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nfunc (a *App) printCapResponse(printPrefix string, msg *gnmi.CapabilityResponse) {\n\tsb := strings.Builder{}\n\tsb.WriteString(\"gNMI version: \")\n\tsb.WriteString(msg.GNMIVersion)\n\tsb.WriteString(\"\\n\")\n\tif a.Config.LocalFlags.CapabilitiesVersion {\n\t\treturn\n\t}\n\tsb.WriteString(\"supported models:\\n\")\n\tfor _, sm := range msg.SupportedModels {\n\t\tsb.WriteString(\"  - \")\n\t\tsb.WriteString(sm.GetName())\n\t\tsb.WriteString(\", \")\n\t\tsb.WriteString(sm.GetOrganization())\n\t\tsb.WriteString(\", \")\n\t\tsb.WriteString(sm.GetVersion())\n\t\tsb.WriteString(\"\\n\")\n\t}\n\tsb.WriteString(\"supported encodings:\\n\")\n\tfor _, se := range msg.SupportedEncodings {\n\t\tsb.WriteString(\"  - \")\n\t\tsb.WriteString(se.String())\n\t\tsb.WriteString(\"\\n\")\n\t}\n\tfmt.Fprintf(a.out, \"%s\\n\", indent(printPrefix, sb.String()))\n}\n\nfunc indent(prefix, s string) string {\n\tif prefix == \"\" {\n\t\treturn s\n\t}\n\tprefix = \"\\n\" + strings.TrimRight(prefix, \"\\n\")\n\tlines := strings.Split(s, \"\\n\")\n\treturn strings.TrimLeft(fmt.Sprintf(\"%s%s\", prefix, strings.Join(lines, prefix)), \"\\n\")\n}\n"
  },
  {
    "path": "pkg/app/version.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage app\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/version\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar downloadURL = \"https://github.com/openconfig/gnmic/raw/main/install.sh\"\n\nfunc (a *App) VersionRun(cmd *cobra.Command, args []string) {\n\tif a.Config.Format != \"json\" {\n\t\tfmt.Printf(\"version : %s\\n\", version.Version)\n\t\tfmt.Printf(\" commit : %s\\n\", version.Commit)\n\t\tfmt.Printf(\"   date : %s\\n\", version.Date)\n\t\tfmt.Printf(\" gitURL : %s\\n\", version.GitURL)\n\t\tfmt.Printf(\"   docs : https://gnmic.openconfig.net\\n\")\n\t\treturn\n\t}\n\tb, err := json.Marshal(map[string]string{\n\t\t\"version\": version.Version,\n\t\t\"commit\":  version.Commit,\n\t\t\"date\":    version.Date,\n\t\t\"gitURL\":  version.GitURL,\n\t\t\"docs\":    \"https://gnmic.openconfig.net\",\n\t}) // need indent? use jq\n\tif err != nil {\n\t\ta.Logger.Printf(\"failed: %v\", err)\n\t\tif !a.Config.Log {\n\t\t\tfmt.Printf(\"failed: %v\\n\", err)\n\t\t}\n\t\treturn\n\t}\n\tfmt.Println(string(b))\n}\n\nfunc (a *App) VersionUpgradeRun(cmd *cobra.Command, args []string) error {\n\tf, err := os.CreateTemp(\"\", \"gnmic\")\n\tdefer os.Remove(f.Name())\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = downloadFile(downloadURL, f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar c *exec.Cmd\n\tswitch a.Config.LocalFlags.UpgradeUsePkg {\n\tcase true:\n\t\tc = exec.Command(\"bash\", f.Name(), \"--use-pkg\")\n\tcase false:\n\t\tc = exec.Command(\"bash\", f.Name())\n\t}\n\n\tc.Stdout = os.Stdout\n\tc.Stderr = os.Stderr\n\terr = c.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// downloadFile will download a file from a URL and write its content to a file\nfunc downloadFile(url string, file *os.File) error {\n\tclient := http.Client{Timeout: 30 * time.Second}\n\t// Get the data\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tresp, err := client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Write the body to file\n\t_, err = io.Copy(file, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/cache/cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\ntype CacheType string\n\nconst (\n\tcacheType_OC    CacheType = \"oc\"\n\tcacheType_Redis CacheType = \"redis\"\n\tcacheType_NATS  CacheType = \"nats\"\n\tcacheType_JS    CacheType = \"jetstream\"\n)\n\nconst (\n\tReadMode_Once           = \"once\"\n\tReadMode_StreamOnChange = \"stream_on_change\"\n\tReadMode_StreamSample   = \"stream_sample\"\n)\n\ntype Cache interface {\n\t// Write inserts the proto.Message (SubscribeResponse) into the cache under a subscription called `sub`\n\tWrite(ctx context.Context, sub string, m proto.Message)\n\t// ReadAll, reads entries from the local cache, return the entries grouped by subscription name.\n\tReadAll() (map[string][]*gnmi.Notification, error)\n\t// Read, reads a single path value from the cache filtering by subscription and target name\n\tRead(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error)\n\t// Subscribes to the local cache and returns the notification over a channel\n\tSubscribe(ctx context.Context, so *ReadOpts) chan *Notification\n\t// Stops the cache\n\tStop()\n\t// DeleteTarget deletes the target from the cache by name\n\tDeleteTarget(name string)\n\t// SetLogger sets a logger for the cache\n\tSetLogger(l *log.Logger)\n}\n\ntype Config struct {\n\tType       CacheType     `mapstructure:\"type,omitempty\" json:\"type,omitempty\"`\n\tAddress    string        `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tTimeout    time.Duration `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tExpiration time.Duration `mapstructure:\"expiration,omitempty\" json:\"expiration,omitempty\"`\n\tDebug      bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\t// NATS, JS and Redis cfg options\n\tUsername string `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword string `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\n\t// JS cfg options\n\tMaxBytes               int64         `mapstructure:\"max-bytes,omitempty\" json:\"max-bytes,omitempty\"`\n\tMaxMsgsPerSubscription int64         `mapstructure:\"max-msgs-per-subscription,omitempty\" json:\"max-msgs-per-subscription,omitempty\"`\n\tFetchBatchSize         int           `mapstructure:\"fetch-batch-size,omitempty\" json:\"fetch-batch-size,omitempty\"`\n\tFetchWaitTime          time.Duration `mapstructure:\"fetch-wait-time,omitempty\" json:\"fetch-wait-time,omitempty\"`\n}\n\nfunc (c *Config) setDefaults() {\n\tif c.Address == \"\" {\n\t\tswitch c.Type {\n\t\tcase cacheType_Redis:\n\t\t\tc.Address = defaultRedisAddress\n\t\tcase cacheType_JS, cacheType_NATS:\n\t\t\tc.Address = defaultNATSAddress\n\t\t}\n\t}\n\tif c.Timeout == 0 {\n\t\tc.Timeout = defaultTimeout\n\t}\n\tif c.Expiration == 0 {\n\t\tc.Expiration = defaultExpiration\n\t}\n\n\tif c.Type != cacheType_JS {\n\t\treturn\n\t}\n\n\tif c.MaxMsgsPerSubscription <= 0 {\n\t\tc.MaxMsgsPerSubscription = defaultMaxMsgs\n\t}\n\tif c.MaxBytes <= 0 {\n\t\tc.MaxBytes = defaultMaxBytes\n\t}\n\tif c.FetchBatchSize <= 0 {\n\t\tc.FetchBatchSize = defaultFetchBatchSize\n\t}\n\tif c.FetchWaitTime <= 0 {\n\t\tc.FetchWaitTime = defaultFetchWaitTime\n\t}\n}\n\nfunc New(c *Config, opts ...Option) (Cache, error) {\n\tif c == nil {\n\t\tc = &Config{Type: cacheType_OC}\n\t}\n\tif c.Type == \"\" {\n\t\tc.Type = cacheType_OC\n\t}\n\tswitch c.Type {\n\tcase cacheType_OC:\n\t\treturn newGNMICache(c, \"\", opts...), nil\n\tcase cacheType_NATS:\n\t\treturn newNATSCache(c, opts...)\n\tcase cacheType_JS:\n\t\treturn newJetStreamCache(c, opts...)\n\tcase cacheType_Redis:\n\t\treturn newRedisCache(c, opts...)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown cache type: %q\", c.Type)\n\t}\n}\n\ntype ReadOpts struct {\n\tSubscription      string\n\tTarget            string\n\tPaths             []*gnmi.Path\n\tMode              string\n\tSampleInterval    time.Duration\n\tHeartbeatInterval time.Duration\n\tSuppressRedundant bool\n\tUpdatesOnly       bool\n\tOverrideTS        bool\n\n\tm        *sync.RWMutex\n\tlastSent map[string]*gnmi.TypedValue\n}\n\nfunc (ro *ReadOpts) setDefaults() {\n\tif ro.Target == \"\" {\n\t\tro.Target = \"*\"\n\t}\n\tif ro.Mode == \"\" {\n\t\tro.Mode = ReadMode_StreamOnChange\n\t}\n\tif len(ro.Paths) == 0 {\n\t\tro.Paths = []*gnmi.Path{{}}\n\t}\n\tif ro.Mode == ReadMode_StreamSample && ro.SampleInterval <= 0 {\n\t\tro.SampleInterval = 10 * time.Second\n\t}\n\tif ro.SuppressRedundant {\n\t\tro.m = new(sync.RWMutex)\n\t\tro.lastSent = make(map[string]*gnmi.TypedValue)\n\t}\n}\n\ntype Notification struct {\n\tName         string\n\tNotification *gnmi.Notification\n\tErr          error\n}\n"
  },
  {
    "path": "pkg/cache/go.mod",
    "content": "module github.com/openconfig/gnmic/pkg/cache\n\ngo 1.24.12\n\nrequire (\n\tgithub.com/go-redis/redis/v8 v8.11.5\n\tgithub.com/nats-io/nats-server/v2 v2.12.4\n\tgithub.com/nats-io/nats.go v1.49.0\n\tgithub.com/openconfig/gnmi v0.14.1\n\tgithub.com/openconfig/gnmic/pkg/api v0.1.10\n\tgoogle.golang.org/protobuf v1.36.11\n)\n\nrequire (\n\tbitbucket.org/creachadair/stringset v0.0.14 // indirect\n\tgithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect\n\tgithub.com/fsnotify/fsnotify v1.9.0 // indirect\n\tgithub.com/golang/glog v1.2.5 // indirect\n\tgithub.com/google/go-tpm v0.9.8 // indirect\n\tgithub.com/klauspost/compress v1.18.3 // indirect\n\tgithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect\n\tgithub.com/nats-io/jwt/v2 v2.8.0 // indirect\n\tgithub.com/nats-io/nkeys v0.4.15 // indirect\n\tgithub.com/nats-io/nuid v1.0.1 // indirect\n\tgithub.com/onsi/gomega v1.27.10 // indirect\n\tgolang.org/x/crypto v0.48.0 // indirect\n\tgolang.org/x/net v0.50.0 // indirect\n\tgolang.org/x/sys v0.41.0 // indirect\n\tgolang.org/x/text v0.34.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect\n\tgoogle.golang.org/grpc v1.78.0 // indirect\n)\n"
  },
  {
    "path": "pkg/cache/go.sum",
    "content": "bitbucket.org/creachadair/stringset v0.0.14 h1:t1ejQyf8utS4GZV/4fM+1gvYucggZkfhb+tMobDxYOE=\nbitbucket.org/creachadair/stringset v0.0.14/go.mod h1:Ej8fsr6rQvmeMDf6CCWMWGb14H9mz8kmDgPPTdiVT0w=\ngithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM=\ngithub.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=\ngithub.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=\ngithub.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=\ngithub.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=\ngithub.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I=\ngithub.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo=\ngithub.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=\ngithub.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk=\ngithub.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=\ngithub.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g=\ngithub.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA=\ngithub.com/nats-io/nats-server/v2 v2.12.4 h1:ZnT10v2LU2Xcoiy8ek9X6Se4YG8EuMfIfvAEuFVx1Ts=\ngithub.com/nats-io/nats-server/v2 v2.12.4/go.mod h1:5MCp/pqm5SEfsvVZ31ll1088ZTwEUdvRX1Hmh/mTTDg=\ngithub.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE=\ngithub.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw=\ngithub.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4=\ngithub.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs=\ngithub.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=\ngithub.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=\ngithub.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=\ngithub.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=\ngithub.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=\ngithub.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=\ngithub.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=\ngithub.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=\ngithub.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs=\ngithub.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0=\ngithub.com/openconfig/gnmic/pkg/api v0.1.10 h1:zU57bogHrnraDFCYDnxHZB8Hcd53bWx1fDkRTPw/R2w=\ngithub.com/openconfig/gnmic/pkg/api v0.1.10/go.mod h1:6PntONfjCMq3XzsDfWMkLeoVuBRbkm2foQO5m6PeYo0=\ngithub.com/openconfig/goyang v1.6.0 h1:JjnPbLY1/y28VyTO67LsEV0TaLWNiZyDcsppGq4F4is=\ngithub.com/openconfig/goyang v1.6.0/go.mod h1:sdNZi/wdTZyLNBNfgLzmmbi7kISm7FskMDKKzMY+x1M=\ngithub.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM=\ngithub.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo=\ngithub.com/openconfig/ygot v0.29.20 h1:XHLpwCN91QuKc2LAvnEqtCmH8OuxgLlErDhrdl2mJw8=\ngithub.com/openconfig/ygot v0.29.20/go.mod h1:K8HbrPm/v8/emtGQ9+RsJXx6UPKC5JzS/FqK7pN+tMo=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=\ngo.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=\ngo.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=\ngo.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=\ngo.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=\ngo.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=\ngo.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=\ngo.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=\ngolang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=\ngolang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=\ngolang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=\ngolang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=\ngolang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=\ngolang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=\ngolang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=\ngolang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=\ngolang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=\ngoogle.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=\ngoogle.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "pkg/cache/jetstream_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/nats-io/nats-server/v2/server\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nconst (\n\tloggingPrefixJetStream = \"[cache:jetstream] \"\n\treconnectTimer         = 5 * time.Second\n\tdefaultFetchBatchSize  = 100\n\tdefaultFetchWaitTime   = 100 * time.Millisecond\n\tdefaultExpiration      = time.Minute\n\tdefaultMaxMsgs         = 1024 * 1024\n\tdefaultMaxBytes        = 1024 * 1024 * 1024\n\tdefaultNATSAddress     = \"127.0.0.1\"\n\tjetStreamSyncName      = \"gnmic-jetstream-cache\"\n)\n\ntype jetStreamCache struct {\n\tcfg        *Config\n\tns         *server.Server\n\tnc         *nats.Conn\n\tjs         nats.JetStreamContext\n\tcfn        context.CancelFunc\n\tstreamChan chan string\n\n\t// configured remote address or locally started server address\n\taddr string\n\toc   *gnmiCache\n\n\tm       *sync.RWMutex\n\tstreams map[string]struct{}\n\tlogger  *log.Logger\n}\n\nfunc newJetStreamCache(cfg *Config, opts ...Option) (*jetStreamCache, error) {\n\tif cfg == nil {\n\t\tcfg = new(Config)\n\t}\n\tcfg.setDefaults()\n\n\tvar err error\n\tc := &jetStreamCache{\n\t\tcfg:        cfg,\n\t\toc:         newGNMICache(cfg, \"jetstream\", opts...),\n\t\tstreamChan: make(chan string),\n\t\tm:          new(sync.RWMutex),\n\t\tstreams:    make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tif c.cfg.Address == defaultNATSAddress {\n\t\tsopts := &server.Options{\n\t\t\tHost:      cfg.Address,\n\t\t\tPort:      -1,\n\t\t\tJetStream: true,\n\t\t\tNoSigs:    true,\n\t\t}\n\n\t\tc.ns, err = server.NewServer(sopts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.logger == nil {\n\t\tc.logger = log.New(os.Stderr, loggingPrefixJetStream, utils.DefaultLoggingFlags)\n\t}\n\tc.start()\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.cfn = cancel\n\tgo c.sync(ctx)\n\treturn c, nil\n}\n\nfunc (c *jetStreamCache) SetLogger(logger *log.Logger) {\n\tif logger != nil && c.logger != nil {\n\t\tc.logger.SetOutput(logger.Writer())\n\t\tc.logger.SetFlags(logger.Flags())\n\t\tc.logger.SetPrefix(loggingPrefixJetStream)\n\t}\n}\n\nfunc (c *jetStreamCache) start() {\nSTART:\n\tif c.ns != nil {\n\t\tgo c.ns.Start()\n\t\tif !c.ns.ReadyForConnections(reconnectTimer) {\n\t\t\tc.ns.Shutdown()\n\t\t\tc.logger.Printf(\"failed to start cache, retrying\")\n\t\t\tgoto START\n\t\t}\n\t}\n\n\tc.addr = c.cfg.Address\n\tif c.ns != nil {\n\t\tc.addr = c.ns.ClientURL()\n\t}\n\n\tvar err error\n\topts := []nats.Option{\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tc.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectHandler(func(_ *nats.Conn) {\n\t\t\tc.logger.Println(\"Disconnected from NATS\")\n\t\t}),\n\t\tnats.ClosedHandler(func(_ *nats.Conn) {\n\t\t\tc.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t}\n\tif c.cfg.Username != \"\" && c.cfg.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.cfg.Username, c.cfg.Password))\n\t}\nCONNECT:\n\tif c.nc != nil {\n\t\tc.nc.Close()\n\t}\n\n\tc.nc, err = nats.Connect(c.addr, opts...)\n\tif err != nil {\n\t\tc.logger.Printf(\"failed to connect: %v\", err)\n\t\ttime.Sleep(reconnectTimer)\n\t\tgoto CONNECT\n\t}\n\n\tc.js, err = c.nc.JetStream()\n\tif err != nil {\n\t\tc.logger.Printf(\"failed to create stream: %v\", err)\n\t\ttime.Sleep(reconnectTimer)\n\t\tgoto CONNECT\n\t}\n}\n\nfunc (c *jetStreamCache) createStream(streamName string, subjects []string) error {\n\tstream, err := c.js.StreamInfo(streamName)\n\tif err != nil {\n\t\tif !errors.Is(err, nats.ErrStreamNotFound) {\n\t\t\treturn err\n\t\t}\n\t}\n\tif c.cfg.Debug {\n\t\tc.logger.Printf(\"found stream %q: %v\", streamName, stream != nil)\n\t}\n\tif stream == nil {\n\t\tc.logger.Printf(\"creating stream %q and subjects %q\", streamName, subjects)\n\t\t_, err = c.js.AddStream(\n\t\t\t&nats.StreamConfig{\n\t\t\t\tName:     streamName,\n\t\t\t\tSubjects: subjects,\n\t\t\t\tMaxMsgs:  c.cfg.MaxMsgsPerSubscription,\n\t\t\t\tMaxBytes: c.cfg.MaxBytes,\n\t\t\t\tDiscard:  nats.DiscardOld,\n\t\t\t\tMaxAge:   c.cfg.Expiration,\n\t\t\t\tStorage:  nats.MemoryStorage,\n\t\t\t})\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *jetStreamCache) Write(ctx context.Context, subscriptionName string, m proto.Message) {\n\tc.writeRemoteJS(ctx, subscriptionName, m)\n\t// publish the subscription name to nats for other gnmic instances\n\tvar ok bool\n\tc.m.RLock()\n\tdefer func() {\n\t\tc.m.RUnlock()\n\t\tif !ok {\n\t\t\tc.m.Lock()\n\t\t\tc.streams[subscriptionName] = struct{}{}\n\t\t\tc.m.Unlock()\n\t\t\t_ = c.nc.Publish(cacheSubjects, []byte(subscriptionName))\n\t\t}\n\t}()\n\t_, ok = c.streams[subscriptionName]\n}\n\nfunc (c *jetStreamCache) writeRemoteJS(ctx context.Context, subscriptionName string, m proto.Message) {\n\tswitch m := m.ProtoReflect().Interface().(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := m.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\ttargetName := rsp.Update.GetPrefix().GetTarget()\n\t\t\tif targetName == \"\" {\n\t\t\t\tc.logger.Printf(\"subscription=%q: response missing target: %v\", subscriptionName, rsp)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// check if a stream with the same name as the subscription is being created or has been created\n\t\t\tc.m.RLock()\n\t\t\t_, ok := c.streams[subscriptionName]\n\t\t\tc.m.RUnlock()\n\t\t\tif !ok {\n\t\t\t\t// add the subscription as a stream and create it in NATS if it doesn't exist\n\t\t\t\tc.m.Lock()\n\t\t\t\tc.streams[subscriptionName] = struct{}{}\n\t\t\t\terr := c.createStream(subscriptionName, []string{fmt.Sprintf(\"%s.>\", subscriptionName)})\n\t\t\t\tif err != nil {\n\t\t\t\t\tdelete(c.streams, subscriptionName)\n\t\t\t\t\tc.m.Unlock()\n\t\t\t\t\tc.logger.Printf(\"failed to create stream: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tc.m.Unlock()\n\t\t\t\tc.streamChan <- subscriptionName\n\t\t\t}\n\n\t\t\t// wait in case the stream is being created\n\t\t\tc.m.RLock()\n\t\t\tdefer c.m.RUnlock()\n\t\t\terr := c.publishNotificationJS(ctx, subscriptionName, targetName, m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Print(err)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (c *jetStreamCache) publishNotificationJS(ctx context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error {\n\tctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout)\n\tdefer cancel()\n\n\tsubjectName, err := subjectName(subscriptionName, targetName, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to build a subject name: %w\", err)\n\t}\n\n\tb, err := proto.Marshal(r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal proto message: %w\", err)\n\t}\n\n\t_, err = c.js.Publish(subjectName, b, nats.Context(ctx))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to publish to JetStream cache: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (c *jetStreamCache) sync(ctx context.Context) {\n\tc.logger.Printf(\"start JetStream sync\")\n\t// this map keeps track of streams already queued\n\tstreams := make(map[string]struct{})\n\tgo func() {\n\tSTART:\n\t\tsubjectSub, err := c.nc.Subscribe(cacheSubjects, func(m *nats.Msg) {\n\t\t\tsubj := string(m.Data)\n\t\t\tc.streamChan <- subj\n\t\t})\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tgoto START\n\t\t}\n\t\tdefer subjectSub.Unsubscribe()\n\t\tfor range ctx.Done() {\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase cc := <-c.streamChan:\n\t\t\tif _, ok := streams[cc]; !ok {\n\t\t\t\tc.logger.Printf(\"start JetStream stream %q sync\", cc)\n\t\t\t\tstreams[cc] = struct{}{}\n\t\t\t\tgo c.syncStream(ctx, cc)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *jetStreamCache) syncStream(ctx context.Context, subject string) {\nSTART:\n\tsub, err := c.js.Subscribe(fmt.Sprintf(\"%s.>\", subject),\n\t\tfunc(msg *nats.Msg) {\n\t\t\tm := new(gnmi.SubscribeResponse)\n\t\t\terr := proto.Unmarshal([]byte(msg.Data), m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed to unmarshal proto msg: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_ = msg.Ack()\n\t\t\tc.oc.Write(ctx, subject, m)\n\t\t},\n\t\tnats.DeliverNew(),\n\t\tnats.Durable(jetStreamSyncName),\n\t)\n\tif err != nil {\n\t\ttime.Sleep(time.Second)\n\t\tgoto START\n\t}\n\tdefer sub.Unsubscribe()\n\tfor range ctx.Done() {\n\t}\n}\n\n// Read //\nfunc (c *jetStreamCache) ReadAll() (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.ReadAll()\n}\n\nfunc (c *jetStreamCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.read(sub, target, p), nil\n}\n\nfunc (c *jetStreamCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification {\n\treturn c.oc.Subscribe(ctx, ro)\n}\n\nfunc (c *jetStreamCache) Stop() {\n\tc.cfn()\n\tif c.nc != nil {\n\t\tc.nc.Close()\n\t}\n\tif c.ns != nil {\n\t\tc.ns.Shutdown()\n\t}\n}\n\nfunc (c *jetStreamCache) DeleteTarget(name string) {\n\tc.oc.DeleteTarget(name)\n}\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\nfunc subjectName(streamName, target string, m proto.Message) (string, error) {\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tsb.WriteString(streamName)\n\tsb.WriteString(\".\")\n\tif target != \"\" {\n\t\tsb.WriteString(target)\n\t\tsb.WriteString(\".\")\n\t}\n\n\tswitch rsp := m.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := rsp.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\tvar prefixSubject string\n\t\t\tif rsp.Update.GetPrefix() != nil {\n\t\t\t\tprefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), subjectOpts{WithKeys: true, WithWildcard: false})[0]\n\t\t\t}\n\t\t\tvar pathSubject string\n\t\t\tif len(rsp.Update.GetUpdate()) > 0 {\n\t\t\t\tpathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), subjectOpts{WithKeys: true, WithWildcard: false})[0]\n\t\t\t}\n\t\t\tif prefixSubject != \"\" {\n\t\t\t\tsb.WriteString(prefixSubject)\n\t\t\t\tsb.WriteString(\".\")\n\t\t\t}\n\t\t\tif pathSubject != \"\" {\n\t\t\t\tsb.WriteString(pathSubject)\n\t\t\t}\n\t\t}\n\t}\n\treturn sb.String(), nil\n}\n\ntype subjectOpts struct {\n\tWithKeys     bool\n\tWithWildcard bool\n}\n\nfunc gNMIPathToSubject(p *gnmi.Path, opts subjectOpts) []string {\n\tif p == nil {\n\t\treturn []string{\"\"}\n\t}\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tif p.GetOrigin() != \"\" {\n\t\tfmt.Fprintf(sb, \"%s.\", p.GetOrigin())\n\t}\n\tfor i, e := range p.GetElem() {\n\t\tif i > 0 {\n\t\t\tsb.WriteString(\".\")\n\t\t}\n\t\tsb.WriteString(e.Name)\n\t\tif opts.WithKeys {\n\t\t\tif len(e.Key) > 0 {\n\t\t\t\t// sort keys by name\n\t\t\t\tkNames := make([]string, 0, len(e.Key))\n\t\t\t\tfor k := range e.Key {\n\t\t\t\t\tkNames = append(kNames, k)\n\t\t\t\t}\n\t\t\t\tsort.Strings(kNames)\n\t\t\t\tfor _, k := range kNames {\n\t\t\t\t\tsk := sanitizeKey(e.GetKey()[k])\n\t\t\t\t\tfmt.Fprintf(sb, \".{%s=%s}\", k, sk)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsubj := sb.String()\n\n\tif subj == \"\" && opts.WithWildcard {\n\t\treturn []string{\".>\"}\n\t}\n\tresult := []string{subj}\n\tif opts.WithWildcard {\n\t\tresult = append(result, subj+\".>\")\n\t}\n\treturn result\n}\n\nconst (\n\tdotReplChar   = \"^\"\n\tspaceReplChar = \"~\"\n)\n\nvar regDot = regexp.MustCompile(`\\.`)\nvar regSpace = regexp.MustCompile(`\\s`)\n\nfunc sanitizeKey(k string) string {\n\ts := regDot.ReplaceAllString(k, dotReplChar)\n\treturn regSpace.ReplaceAllString(s, spaceReplChar)\n}\n"
  },
  {
    "path": "pkg/cache/jetstream_cache_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"testing\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nfunc Test_natsCache_Write(t *testing.T) {\n\ttype fields struct {\n\t\tcfg *Config\n\t}\n\ttype args struct {\n\t\tctx              context.Context\n\t\tsubscriptionName string\n\t\tm                proto.Message\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t}{\n\t\t{\n\t\t\tname: \"test1\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: &Config{\n\t\t\t\t\tType: cacheType_JS,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tctx:              context.TODO(),\n\t\t\t\tsubscriptionName: \"sub1\",\n\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tPrefix: &gnmi.Path{\n\t\t\t\t\t\t\t\tTarget: \"router1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"description\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\t\t\t\t\t\t\tAsciiVal: \"interface_description\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test2\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: &Config{\n\t\t\t\t\tType: cacheType_JS,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tctx:              context.TODO(),\n\t\t\t\tsubscriptionName: \"sub1\",\n\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tPrefix: &gnmi.Path{\n\t\t\t\t\t\t\t\tTarget: \"router1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"description\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\t\t\t\t\t\t\tAsciiVal: \"interface_description\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"statistics\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"in-octets\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\t\t\t\t\t\t\tAsciiVal: \"42\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc, err := New(tt.fields.cfg, WithLogger(log.Default()))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tc.Write(tt.args.ctx, tt.args.subscriptionName, tt.args.m)\n\t\t\trs, err := c.ReadAll()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tfor s, ns := range rs {\n\t\t\t\tt.Logf(\"sub %s, read %d msgs: %+v\", s, len(ns), ns)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/cache/nats_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/nats-io/nats-server/v2/server\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nconst (\n\tloggingPrefixNATS       = \"[cache:nats] \"\n\tcacheSubjects           = \"gnmic.cache.subjects\"\n\tsubjectCacheResetPeriod = 30 * time.Second\n)\n\ntype natsCache struct {\n\tcfg *Config\n\toc  *gnmiCache\n\n\tns  *server.Server\n\tnc  *nats.Conn\n\tcfn context.CancelFunc\n\n\tsubjectChan chan string\n\n\t// configured remote address or locally started server address\n\taddr string\n\n\tm        *sync.RWMutex\n\tsubjects map[string]struct{}\n\tlogger   *log.Logger\n}\n\nfunc newNATSCache(cfg *Config, opts ...Option) (*natsCache, error) {\n\tif cfg == nil {\n\t\tcfg = new(Config)\n\t}\n\tcfg.setDefaults()\n\n\tvar err error\n\tc := &natsCache{\n\t\tcfg:         cfg,\n\t\toc:          newGNMICache(cfg, \"nats\", opts...),\n\t\tsubjectChan: make(chan string),\n\n\t\tm:        new(sync.RWMutex),\n\t\tsubjects: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tif c.cfg.Address == defaultNATSAddress {\n\t\tsopts := &server.Options{\n\t\t\tHost:   cfg.Address,\n\t\t\tPort:   -1,\n\t\t\tNoSigs: true,\n\t\t}\n\n\t\tc.ns, err = server.NewServer(sopts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif c.logger == nil {\n\t\tc.logger = log.New(os.Stderr, loggingPrefixNATS, utils.DefaultLoggingFlags)\n\t}\n\tc.start()\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.cfn = cancel\n\tgo c.sync(ctx)\n\treturn c, nil\n}\n\nfunc (c *natsCache) start() {\nSTART:\n\tif c.ns != nil {\n\t\tgo c.ns.Start()\n\t\tif !c.ns.ReadyForConnections(reconnectTimer) {\n\t\t\tc.ns.Shutdown()\n\t\t\tc.logger.Printf(\"failed to start cache, retrying\")\n\t\t\tgoto START\n\t\t}\n\t}\n\n\tc.addr = c.cfg.Address\n\tif c.ns != nil {\n\t\tc.addr = c.ns.ClientURL()\n\t}\n\n\tvar err error\n\topts := []nats.Option{\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tc.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectHandler(func(_ *nats.Conn) {\n\t\t\tc.logger.Println(\"Disconnected from NATS\")\n\t\t}),\n\t\tnats.ClosedHandler(func(_ *nats.Conn) {\n\t\t\tc.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t\tnats.Timeout(c.cfg.Timeout),\n\t}\n\tif c.cfg.Username != \"\" && c.cfg.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.cfg.Username, c.cfg.Password))\n\t}\nCONNECT:\n\tif c.nc != nil {\n\t\tc.nc.Close()\n\t}\n\n\tc.nc, err = nats.Connect(c.addr, opts...)\n\tif err != nil {\n\t\tc.logger.Printf(\"failed to connect: %v\", err)\n\t\ttime.Sleep(reconnectTimer)\n\t\tgoto CONNECT\n\t}\n}\n\nfunc (c *natsCache) sync(ctx context.Context) {\n\tc.logger.Printf(\"start NATS sync\")\n\t// this map keeps track of subjects already queued\n\tsubjects := make(map[string]struct{})\n\tgo func() {\n\t\tticker := time.NewTicker(subjectCacheResetPeriod)\n\tSTART:\n\t\tsubjectSub, err := c.nc.Subscribe(cacheSubjects, func(m *nats.Msg) {\n\t\t\tsubj := string(m.Data)\n\t\t\tc.subjectChan <- subj\n\t\t})\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tgoto START\n\t\t}\n\t\tdefer subjectSub.Unsubscribe()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tc.m.Lock()\n\t\t\t\tc.subjects = make(map[string]struct{})\n\t\t\t\tc.m.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase cc := <-c.subjectChan:\n\t\t\tif _, ok := subjects[cc]; !ok {\n\t\t\t\tc.logger.Printf(\"start NATS topic %q sync\", cc)\n\t\t\t\tsubjects[cc] = struct{}{}\n\t\t\t\tgo c.syncSubject(ctx, cc)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *natsCache) syncSubject(ctx context.Context, subject string) {\nSTART:\n\tsub, err := c.nc.Subscribe(fmt.Sprintf(\"%s.>\", subject),\n\t\tfunc(msg *nats.Msg) {\n\t\t\tm := new(gnmi.SubscribeResponse)\n\t\t\terr := proto.Unmarshal([]byte(msg.Data), m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed to unmarshal proto msg: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.oc.Write(ctx, subject, m)\n\t\t})\n\tif err != nil {\n\t\ttime.Sleep(time.Second)\n\t\tgoto START\n\t}\n\tdefer sub.Unsubscribe()\n\tfor range ctx.Done() {\n\t}\n}\n\nfunc (c *natsCache) Write(ctx context.Context, subscriptionName string, m proto.Message) {\n\t// write the msg to nats\n\tc.writeRemoteNATS(ctx, subscriptionName, m)\n\t// publish the subscription name to nats for other gnmic instances\n\tvar ok bool\n\tc.m.RLock()\n\tdefer func() {\n\t\tc.m.RUnlock()\n\t\tif !ok {\n\t\t\tc.m.Lock()\n\t\t\tc.subjects[subscriptionName] = struct{}{}\n\t\t\tc.m.Unlock()\n\t\t\t_ = c.nc.Publish(cacheSubjects, []byte(subscriptionName))\n\t\t}\n\t}()\n\t_, ok = c.subjects[subscriptionName]\n}\n\nfunc (c *natsCache) writeRemoteNATS(ctx context.Context, subscriptionName string, m proto.Message) {\n\tswitch m := m.ProtoReflect().Interface().(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := m.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\ttargetName := rsp.Update.GetPrefix().GetTarget()\n\t\t\tif targetName == \"\" {\n\t\t\t\tc.logger.Printf(\"subscription=%q: response missing target: %v\", subscriptionName, rsp)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.subjectChan <- subscriptionName\n\t\t\tvar err error\n\t\t\terr = c.publishNotificationNATS(ctx, subscriptionName, targetName, m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Print(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *natsCache) publishNotificationNATS(_ context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error {\n\tb, err := proto.Marshal(r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal proto message: %w\", err)\n\t}\n\terr = c.nc.Publish(fmt.Sprintf(\"%s.%s\", subscriptionName, targetName), b)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to publish to NATS cache: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (c *natsCache) ReadAll() (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.ReadAll()\n}\n\nfunc (c *natsCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.read(sub, target, p), nil\n}\n\nfunc (c *natsCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification {\n\treturn c.oc.Subscribe(ctx, ro)\n}\n\nfunc (c *natsCache) Stop() {\n\tc.cfn()\n\tif c.nc != nil {\n\t\tc.nc.Close()\n\t}\n\tif c.ns != nil {\n\t\tc.ns.Shutdown()\n\t}\n}\n\nfunc (c *natsCache) SetLogger(logger *log.Logger) {\n\tif logger != nil && c.logger != nil {\n\t\tc.logger.SetOutput(logger.Writer())\n\t\tc.logger.SetFlags(logger.Flags())\n\t\tc.logger.SetPrefix(loggingPrefixNATS)\n\t}\n}\n\nfunc (c *natsCache) DeleteTarget(name string) {\n\tc.oc.DeleteTarget(name)\n}\n"
  },
  {
    "path": "pkg/cache/oc_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tocCache \"github.com/openconfig/gnmi/cache\"\n\t\"github.com/openconfig/gnmi/ctree\"\n\t\"github.com/openconfig/gnmi/match\"\n\t\"github.com/openconfig/gnmi/path\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/subscribe\"\n\tgpath \"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\nconst (\n\tloggingPrefixOC = \"[cache:oc] \"\n\tdefaultTimeout  = 10 * time.Second\n)\n\ntype gnmiCache struct {\n\tm      *sync.Mutex\n\tcaches map[string]*subCache\n\t// match  *match.Match\n\n\tlogger     *log.Logger\n\texpiration time.Duration\n\tdebug      bool\n}\n\ntype subCache struct {\n\tc     *ocCache.Cache\n\tmatch *match.Match\n}\n\nfunc (gc *gnmiCache) loadConfig(gcc *Config) {\n\tgc.expiration = gcc.Expiration\n\tgc.logger = log.New(io.Discard, loggingPrefixOC, utils.DefaultLoggingFlags)\n\tgc.debug = gcc.Debug\n}\n\nfunc newGNMICache(cfg *Config, loggingPrefix string, opts ...Option) *gnmiCache {\n\tif cfg == nil {\n\t\tcfg = new(Config)\n\t}\n\tgc := &gnmiCache{\n\t\tm: new(sync.Mutex),\n\t\t// match:  match.New(),\n\t\tcaches: make(map[string]*subCache),\n\t}\n\tcfg.setDefaults()\n\n\tgc.loadConfig(cfg)\n\tfor _, opt := range opts {\n\t\topt(gc)\n\t}\n\tif gc.logger != nil {\n\t\tif loggingPrefix == \"\" {\n\t\t\tloggingPrefix = \"oc\"\n\t\t}\n\t\tgc.logger.SetPrefix(loggingPrefixOC)\n\t}\n\treturn gc\n}\n\nfunc (gc *subCache) update(n *ctree.Leaf) {\n\tswitch v := n.Value().(type) {\n\tcase *gnmi.Notification:\n\t\tpathElems := path.ToStrings(v.GetPrefix(), true)\n\t\tsubscribe.UpdateNotification(gc.match, n, v, pathElems)\n\tdefault:\n\t\t// gc.logger.Printf(\"unexpected update type: %T\", v)\n\t}\n}\n\nfunc (gc *gnmiCache) SetLogger(logger *log.Logger) {\n\tif logger != nil && gc.logger != nil {\n\t\tgc.logger.SetOutput(logger.Writer())\n\t\tgc.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (gc *gnmiCache) Write(ctx context.Context, measName string, m proto.Message) {\n\tvar err error\n\tswitch rsp := m.ProtoReflect().Interface().(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := rsp.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\ttarget := rsp.Update.GetPrefix().GetTarget()\n\t\t\tif target == \"\" {\n\t\t\t\tgc.logger.Printf(\"subscription=%q: response missing target: %v\", measName, rsp)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// if the update does not have a prefix path,\n\t\t\t// check that each update has a path.\n\t\t\tif len(rsp.Update.GetPrefix().GetElem()) == 0 {\n\t\t\t\tfor _, upd := range rsp.Update.GetUpdate() {\n\t\t\t\t\tif len(upd.GetPath().GetElem()) == 0 {\n\t\t\t\t\t\tgc.logger.Printf(\"write fail: received an update with en empty path: %v\", upd)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tgc.m.Lock()\n\t\t\tsCache, ok := gc.caches[measName]\n\t\t\tif !ok {\n\t\t\t\tsCache = &subCache{\n\t\t\t\t\tc:     ocCache.New(nil),\n\t\t\t\t\tmatch: match.New(),\n\t\t\t\t}\n\t\t\t\tsCache.c.SetClient(sCache.update)\n\t\t\t\tsCache.c.Add(target)\n\t\t\t\tgc.logger.Printf(\"target %q added to local cache %q\", target, measName)\n\t\t\t\tgc.caches[measName] = sCache\n\t\t\t}\n\t\t\tif !sCache.c.HasTarget(target) {\n\t\t\t\tsCache.c.Add(target)\n\t\t\t\tgc.logger.Printf(\"target %q added to local cache %q\", target, measName)\n\t\t\t}\n\t\t\tgc.m.Unlock()\n\t\t\t// do not write updates with nil values to cache.\n\t\t\tnotif := &gnmi.Notification{\n\t\t\t\tTimestamp: rsp.Update.GetTimestamp(),\n\t\t\t\tPrefix:    rsp.Update.GetPrefix(),\n\t\t\t\tUpdate:    make([]*gnmi.Update, 0, len(rsp.Update.GetUpdate())),\n\t\t\t\tDelete:    rsp.Update.GetDelete(),\n\t\t\t\tAtomic:    rsp.Update.GetAtomic(),\n\t\t\t}\n\t\t\tfor _, upd := range rsp.Update.GetUpdate() {\n\t\t\t\tif upd.Val == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnotif.Update = append(notif.Update, upd)\n\t\t\t}\n\t\t\tif len(notif.Update) == 0 && len(notif.Delete) == 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = sCache.c.GnmiUpdate(notif)\n\t\t\tif err != nil {\n\t\t\t\tgc.logger.Printf(\"failed to update gNMI cache: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (gc *gnmiCache) ReadAll() (map[string][]*gnmi.Notification, error) {\n\treturn gc.read(\"\", \"*\", nil), nil\n}\n\nfunc (gc *gnmiCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) {\n\treturn gc.read(sub, target, p), nil\n}\n\nfunc (gc *gnmiCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification {\n\tif ro == nil {\n\t\tro = new(ReadOpts)\n\t}\n\n\tro.setDefaults()\n\tch := make(chan *Notification)\n\tgo gc.subscribe(ctx, ro, ch)\n\n\treturn ch\n}\n\nfunc (gc *gnmiCache) subscribe(ctx context.Context, ro *ReadOpts, ch chan *Notification) {\n\tdefer close(ch)\n\tswitch ro.Mode {\n\tcase ReadMode_Once:\n\t\tgc.handleSingleQuery(ctx, ro, ch)\n\tcase ReadMode_StreamOnChange: // default:\n\t\tro.SuppressRedundant = false\n\t\tgc.handleOnChangeQuery(ctx, ro, ch)\n\tcase ReadMode_StreamSample:\n\t\tgc.handleSampledQuery(ctx, ro, ch)\n\t}\n}\n\nfunc (gc *gnmiCache) handleSingleQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) {\n\tif gc.debug {\n\t\tgc.logger.Printf(\"running single query for target %q\", ro.Target)\n\t}\n\n\tcaches := gc.getCaches(ro.Subscription)\n\n\tif gc.debug {\n\t\tgc.logger.Printf(\"single query got %d caches\", len(caches))\n\t}\n\twg := new(sync.WaitGroup)\n\twg.Add(len(caches))\n\n\tfor name, c := range caches {\n\t\tgo func(name string, c *subCache) {\n\t\t\tdefer wg.Done()\n\t\t\tif !c.c.HasTarget(ro.Target) {\n\t\t\t\tif gc.debug {\n\t\t\t\t\tgc.logger.Printf(\"subscription-cache %q doesn't have target: %q\", name, ro.Target)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, p := range ro.Paths {\n\t\t\t\tfp, err := path.CompletePath(p, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgc.logger.Printf(\"failed to generate CompletePath from %v\", p)\n\t\t\t\t\tch <- &Notification{Name: name, Err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = c.c.Query(ro.Target, fp,\n\t\t\t\t\tfunc(_ []string, l *ctree.Leaf, _ interface{}) error {\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch gl := l.Value().(type) {\n\t\t\t\t\t\tcase *gnmi.Notification:\n\t\t\t\t\t\t\tif ro.OverrideTS {\n\t\t\t\t\t\t\t\t// override timestamp\n\t\t\t\t\t\t\t\tgl = proto.Clone(gl).(*gnmi.Notification)\n\t\t\t\t\t\t\t\tgl.Timestamp = time.Now().UnixNano()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t//no suppress redundant, send to channel and return\n\t\t\t\t\t\t\tif !ro.SuppressRedundant {\n\t\t\t\t\t\t\t\tch <- &Notification{Name: name, Notification: gl}\n\t\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// suppress redundant part\n\t\t\t\t\t\t\tif ro.lastSent == nil {\n\t\t\t\t\t\t\t\tro.lastSent = make(map[string]*gnmi.TypedValue)\n\t\t\t\t\t\t\t\tro.m = new(sync.RWMutex)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tprefix := gpath.GnmiPathToXPath(gl.GetPrefix(), true)\n\t\t\t\t\t\t\ttarget := gl.GetPrefix().GetTarget()\n\t\t\t\t\t\t\tfor _, upd := range gl.GetUpdate() {\n\t\t\t\t\t\t\t\tp := gpath.GnmiPathToXPath(upd.GetPath(), true)\n\t\t\t\t\t\t\t\tvalXPath := strings.Join([]string{target, prefix, p}, \"/\")\n\t\t\t\t\t\t\t\tro.m.RLock()\n\t\t\t\t\t\t\t\tsv, ok := ro.lastSent[valXPath]\n\t\t\t\t\t\t\t\tro.m.RUnlock()\n\t\t\t\t\t\t\t\tif !ok || !proto.Equal(sv, upd.Val) {\n\t\t\t\t\t\t\t\t\tch <- &Notification{\n\t\t\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\t\t\tNotification: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\t\t\tTimestamp: gl.GetTimestamp(),\n\t\t\t\t\t\t\t\t\t\t\tPrefix:    gl.GetPrefix(),\n\t\t\t\t\t\t\t\t\t\t\tUpdate:    []*gnmi.Update{upd},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tro.m.Lock()\n\t\t\t\t\t\t\t\t\tro.lastSent[valXPath] = upd.Val\n\t\t\t\t\t\t\t\t\tro.m.Unlock()\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif gl.GetDelete() != nil {\n\t\t\t\t\t\t\t\tch <- &Notification{\n\t\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\t\tNotification: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\t\tTimestamp: gl.GetTimestamp(),\n\t\t\t\t\t\t\t\t\t\tPrefix:    gl.GetPrefix(),\n\t\t\t\t\t\t\t\t\t\tDelete:    gl.GetDelete(),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tgc.logger.Printf(\"target %q failed internal cache query: %v\", ro.Target, err)\n\t\t\t\t\tch <- &Notification{Name: name, Err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(name, c)\n\t}\n\twg.Wait()\n}\n\nfunc (gc *gnmiCache) handleSampledQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) {\n\tif !ro.UpdatesOnly {\n\t\tgc.handleSingleQuery(ctx, ro, ch)\n\t}\n\n\tticker := time.NewTicker(ro.SampleInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tgc.logger.Printf(\"periodic query to target %q stopped: %v\", ro.Target, ctx.Err())\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tgc.handleSingleQuery(ctx, ro, ch)\n\t\t}\n\t}\n}\n\nfunc (gc *gnmiCache) handleOnChangeQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) {\n\tcaches := gc.getCaches(ro.Subscription)\n\tnumCaches := len(caches)\n\tgc.logger.Printf(\"on-change query got %d cache(s)\", numCaches)\n\n\twg := new(sync.WaitGroup)\n\twg.Add(numCaches)\n\n\tfor name, c := range caches {\n\t\tgo func(name string, c *subCache) {\n\t\t\tdefer wg.Done()\n\t\t\tif !c.c.HasTarget(ro.Target) {\n\t\t\t\tif gc.debug {\n\t\t\t\t\tgc.logger.Printf(\"subscription-cache %q doesn't have target: %q\", name, ro.Target)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, p := range ro.Paths {\n\t\t\t\tcp, err := path.CompletePath(p, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgc.logger.Printf(\"failed to generate CompletePath from %v\", p)\n\t\t\t\t\tch <- &Notification{Name: name, Err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// handle updates only\n\t\t\t\tif !ro.UpdatesOnly {\n\t\t\t\t\terr = c.c.Query(ro.Target, cp,\n\t\t\t\t\t\tfunc(_ []string, l *ctree.Leaf, _ interface{}) error {\n\t\t\t\t\t\t\tswitch gl := l.Value().(type) {\n\t\t\t\t\t\t\tcase *gnmi.Notification:\n\t\t\t\t\t\t\t\tch <- &Notification{Name: name, Notification: gl}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tgc.logger.Printf(\"failed to run cache query for target %q and path %q: %v\", ro.Target, cp, err)\n\t\t\t\t\t\tch <- &Notification{Name: name, Err: err}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// main on-change subscription\n\t\t\t\tfp := make([]string, 0, len(cp)+1)\n\t\t\t\tfp = append(fp, ro.Target)\n\t\t\t\tfp = append(fp, cp...)\n\t\t\t\t// set callback\n\t\t\t\tmc := &matchClient{name: name, ch: ch}\n\t\t\t\tremove := c.match.AddQuery(fp, mc)\n\t\t\t\tdefer remove()\n\n\t\t\t\t// handle on-change heartbeat\n\t\t\t\tif ro.HeartbeatInterval > 0 {\n\t\t\t\t\t// run a sampled query using heartbeat interval as sample interval\n\t\t\t\t\tgc.handleSampledQuery(ctx, &ReadOpts{\n\t\t\t\t\t\tSubscription:   ro.Subscription,\n\t\t\t\t\t\tTarget:         ro.Target,\n\t\t\t\t\t\tPaths:          ro.Paths,\n\t\t\t\t\t\tMode:           ReadMode_StreamSample,\n\t\t\t\t\t\tSampleInterval: ro.HeartbeatInterval,\n\t\t\t\t\t\tOverrideTS:     ro.OverrideTS,\n\t\t\t\t\t}, ch)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor range ctx.Done() {\n\t\t\t}\n\t\t}(name, c)\n\t}\n\twg.Wait()\n}\n\nfunc (gc *gnmiCache) Stop() {}\n\nfunc (gc *gnmiCache) read(sub, target string, p *gnmi.Path) map[string][]*gnmi.Notification {\n\tnotificationChan := make(chan *Notification)\n\tnotifications := make(map[string][]*gnmi.Notification, 0)\n\tdoneCh := make(chan struct{})\n\t// this go routine will collect all the notifications\n\t// from the cache queries\n\tgo func() {\n\t\tfor nn := range notificationChan {\n\t\t\tif _, ok := notifications[nn.Name]; !ok {\n\t\t\t\tnotifications[nn.Name] = make([]*gnmi.Notification, 0)\n\t\t\t}\n\t\t\tnotifications[nn.Name] = append(notifications[nn.Name], nn.Notification)\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\tif sub == \"*\" {\n\t\tsub = \"\"\n\t}\n\tnow := time.Now()\n\twg := new(sync.WaitGroup)\n\tcaches := gc.getCaches(sub)\n\twg.Add(len(caches))\n\n\tfor name, c := range caches {\n\t\tgo func(c *subCache, name string) {\n\t\t\tdefer wg.Done()\n\t\t\tcp, err := path.CompletePath(p, nil)\n\t\t\tif err != nil {\n\t\t\t\tgc.logger.Printf(\"failed to generate CompletePath from %v\", p)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = c.c.Query(target, cp,\n\t\t\t\tfunc(_ []string, _ *ctree.Leaf, v interface{}) error {\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tswitch notif := v.(type) {\n\t\t\t\t\tcase *gnmi.Notification:\n\t\t\t\t\t\tif gc.expiration > 0 &&\n\t\t\t\t\t\t\ttime.Unix(0, notif.Timestamp).Before(now.Add(time.Duration(-gc.expiration))) {\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tnotificationChan <- &Notification{\n\t\t\t\t\t\t\tName:         name,\n\t\t\t\t\t\t\tNotification: notif,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tgc.logger.Printf(\"failed cache query:%v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(c, name)\n\t}\n\twg.Wait()\n\tclose(notificationChan)\n\t// wait for notifications to be appended to the array\n\t<-doneCh\n\treturn notifications\n}\n\nfunc (gc *gnmiCache) getCaches(names ...string) map[string]*subCache {\n\tgc.m.Lock()\n\tdefer gc.m.Unlock()\n\n\tcaches := make(map[string]*subCache)\n\tnumCaches := len(names)\n\tif numCaches == 0 || (numCaches == 1 && names[0] == \"\") {\n\t\tfor n, c := range gc.caches {\n\t\t\tcaches[n] = c\n\t\t}\n\t\treturn caches\n\t}\n\tfor _, n := range names {\n\t\tif c, ok := gc.caches[n]; ok {\n\t\t\tcaches[n] = c\n\t\t}\n\t}\n\treturn caches\n}\n\nfunc (gc *gnmiCache) DeleteTarget(name string) {\n\tcaches := gc.getCaches()\n\tfor _, c := range caches {\n\t\tc.c.Remove(name)\n\t}\n}\n\n// match client\ntype matchClient struct {\n\tname string\n\tch   chan *Notification\n}\n\nfunc (m *matchClient) Update(n interface{}) {\n\tswitch n := n.(type) {\n\tcase *ctree.Leaf:\n\t\tswitch v := n.Value().(type) {\n\t\tcase *gnmi.Notification:\n\t\t\tm.ch <- &Notification{\n\t\t\t\tName:         m.name,\n\t\t\t\tNotification: v,\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/cache/oc_cache_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nfunc Test_gnmiCache_read(t *testing.T) {\n\ttype input struct {\n\t\tmeasName string\n\t\ttarget   string\n\t\tm        *gnmi.SubscribeResponse\n\t}\n\ttype fields struct {\n\t\tinputs []input\n\t}\n\ttype args struct {\n\t\tsub    string\n\t\ttarget string\n\t\tp      *gnmi.Path\n\t}\n\ttests := []struct {\n\t\tname              string\n\t\tfields            fields\n\t\targs              args\n\t\twant              map[string][]*gnmi.Notification\n\t\texpectedRespCount int\n\t}{\n\t\t{\n\t\t\tname: \"test1\",\n\t\t\tfields: fields{\n\t\t\t\tinputs: []input{\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"enable\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsub:    \"sub1\",\n\t\t\t\ttarget: \"*\",\n\t\t\t\tp: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t}},\n\t\t\t},\n\t\t\twant:              map[string][]*gnmi.Notification{},\n\t\t\texpectedRespCount: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"test2\",\n\t\t\tfields: fields{\n\t\t\t\tinputs: []input{\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"enable\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsub:    \"sub1\",\n\t\t\t\ttarget: \"*\",\n\t\t\t\tp: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t}},\n\t\t\t},\n\t\t\twant:              map[string][]*gnmi.Notification{},\n\t\t\texpectedRespCount: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"readAll_same_subscription\",\n\t\t\tfields: fields{\n\t\t\t\tinputs: []input{\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"enable\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsub:    \"\",\n\t\t\t\ttarget: \"*\",\n\t\t\t\tp:      nil,\n\t\t\t},\n\t\t\twant:              map[string][]*gnmi.Notification{},\n\t\t\texpectedRespCount: 2,\n\t\t},\n\t\t{\n\t\t\tname: \"readAll\",\n\t\t\tfields: fields{\n\t\t\t\tinputs: []input{\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub1\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"system\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"host-name\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"srl1\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tmeasName: \"sub2\",\n\t\t\t\t\t\ttarget:   \"t1\",\n\t\t\t\t\t\tm: &gnmi.SubscribeResponse{\n\t\t\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\t\t\t\t\t\tPrefix:    &gnmi.Path{Target: \"t1\"},\n\t\t\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"enable\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tsub:    \"\",\n\t\t\t\ttarget: \"*\",\n\t\t\t\tp:      nil,\n\t\t\t},\n\t\t\twant:              map[string][]*gnmi.Notification{},\n\t\t\texpectedRespCount: 2,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgc := newGNMICache(&Config{}, \"oc\", WithLogger(log.Default()))\n\t\t\tfor _, in := range tt.fields.inputs {\n\t\t\t\tgc.Write(context.TODO(), in.measName, in.m)\n\t\t\t}\n\n\t\t\trsp := gc.read(tt.args.sub, tt.args.target, tt.args.p)\n\t\t\tif _, ok := rsp[tt.args.sub]; !ok && tt.args.sub != \"\" {\n\t\t\t\tt.Errorf(\"%s: response does not contain the expected subscription name\", tt.name)\n\t\t\t}\n\t\t\tvar rspCount int\n\t\t\tif tt.args.sub == \"\" {\n\t\t\t\tfor _, rsps := range rsp {\n\t\t\t\t\trspCount += len(rsps)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trspCount = len(rsp[tt.args.sub])\n\t\t\t}\n\t\t\tif tt.expectedRespCount != rspCount {\n\t\t\t\tt.Errorf(\"%s: unexpected response count, got %d, expected %d\", tt.name, rspCount, tt.expectedRespCount)\n\t\t\t}\n\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/cache/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport \"log\"\n\ntype Option func(Cache)\n\nfunc WithLogger(logger *log.Logger) Option {\n\treturn func(c Cache) {\n\t\tc.SetLogger(logger)\n\t}\n}\n"
  },
  {
    "path": "pkg/cache/redis_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\tredis \"github.com/go-redis/redis/v8\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nconst (\n\tloggingPrefixRedis   = \"[cache:redis] \"\n\tcacheChannelsChannel = \"gnmic_cache_channels\"\n\tdefaultRedisAddress  = \"127.0.0.1:6379\"\n)\n\ntype redisCache struct {\n\tcfg *Config\n\toc  *gnmiCache\n\tcfn context.CancelFunc\n\n\tc           *redis.Client\n\tchannelChan chan string\n\tm           *sync.RWMutex\n\tchannels    map[string]struct{}\n\tlogger      *log.Logger\n}\n\nfunc newRedisCache(cfg *Config, opts ...Option) (*redisCache, error) {\n\tif cfg == nil {\n\t\tcfg = &Config{\n\t\t\tType:    cacheType_Redis,\n\t\t\tAddress: defaultRedisAddress,\n\t\t}\n\t}\n\tcfg.setDefaults()\n\n\tc := &redisCache{\n\t\tcfg:         cfg,\n\t\toc:          newGNMICache(cfg, \"redis\", opts...),\n\t\tchannelChan: make(chan string),\n\t\tm:           new(sync.RWMutex),\n\t\tchannels:    make(map[string]struct{}),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tif c.logger == nil {\n\t\tc.logger = log.New(os.Stderr, loggingPrefixRedis, utils.DefaultLoggingFlags)\n\t}\nCLIENT:\n\tc.c = redis.NewClient(&redis.Options{\n\t\tAddr:     cfg.Address,\n\t\tUsername: cfg.Username,\n\t\tPassword: cfg.Password,\n\t\tDB:       0,\n\t})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.cfn = cancel\n\n\tpong, err := c.c.Ping(ctx).Result()\n\tif err != nil {\n\t\tc.logger.Printf(\"failed to connect to redis: %v\", err)\n\t\ttime.Sleep(time.Second)\n\t\tgoto CLIENT\n\t}\n\n\tc.logger.Printf(\"ping result: %s\", pong)\n\tgo c.sync(ctx)\n\treturn c, nil\n}\n\nfunc (c *redisCache) SetLogger(logger *log.Logger) {\n\tif logger != nil && c.logger != nil {\n\t\tc.logger.SetOutput(logger.Writer())\n\t\tc.logger.SetFlags(logger.Flags())\n\t\tc.logger.SetPrefix(loggingPrefixRedis)\n\t}\n}\n\nfunc (c *redisCache) Write(ctx context.Context, subscriptionName string, m proto.Message) {\n\t// write the msg to redis\n\tc.writeRemoteREDIS(ctx, subscriptionName, m)\n\t// publish the subscription name to redis for other gnmic instances\n\tvar ok bool\n\tc.m.RLock()\n\tdefer func() {\n\t\tc.m.RUnlock()\n\t\tif !ok {\n\t\t\tc.m.Lock()\n\t\t\tc.channels[subscriptionName] = struct{}{}\n\t\t\tc.m.Unlock()\n\t\t\tc.c.Publish(ctx, cacheChannelsChannel, []byte(subscriptionName))\n\t\t}\n\t}()\n\t_, ok = c.channels[subscriptionName]\n}\n\nfunc (c *redisCache) writeRemoteREDIS(ctx context.Context, subscriptionName string, m proto.Message) {\n\tswitch m := m.ProtoReflect().Interface().(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := m.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\ttargetName := rsp.Update.GetPrefix().GetTarget()\n\t\t\tif targetName == \"\" {\n\t\t\t\tc.logger.Printf(\"subscription=%q: response missing target: %v\", subscriptionName, rsp)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.channelChan <- subscriptionName\n\t\t\tvar err error\n\t\t\terr = c.publishNotificationREDIS(ctx, subscriptionName, targetName, m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Print(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *redisCache) publishNotificationREDIS(ctx context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error {\n\tctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout)\n\tdefer cancel()\n\n\tb, err := proto.Marshal(r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal proto message: %w\", err)\n\t}\n\tstatus := c.c.Publish(ctx, fmt.Sprintf(\"%s.%s\", subscriptionName, targetName), b)\n\tif status.Err() != nil {\n\t\terr = fmt.Errorf(\"failed to publish statusErr: %v\", status.Err())\n\t\tc.logger.Print(err)\n\t\treturn err\n\t}\n\t_, err = status.Result()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to publish resultErr: %v\", err)\n\t\tc.logger.Print(err)\n\t}\n\treturn nil\n}\n\nfunc (c *redisCache) ReadAll() (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.ReadAll()\n}\n\nfunc (c *redisCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) {\n\treturn c.oc.read(sub, target, p), nil\n}\n\nfunc (c *redisCache) sync(ctx context.Context) {\n\tc.logger.Printf(\"start redis sync\")\n\t// subscribe to cache channel updates\n\t// and periodically reset the local channels map.\n\tgo func() {\n\t\tticker := time.NewTicker(subjectCacheResetPeriod)\n\t\tchannelSub := c.c.Subscribe(ctx, cacheChannelsChannel)\n\t\tdefer channelSub.Close()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase msg := <-channelSub.Channel():\n\t\t\t\t// pass the channel name to start syncChannel func\n\t\t\t\tc.channelChan <- msg.Payload\n\t\t\tcase <-ticker.C:\n\t\t\t\t// reset local channels map to re trigger broadcast\n\t\t\t\tc.m.Lock()\n\t\t\t\tc.channels = make(map[string]struct{})\n\t\t\t\tc.m.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\n\t// keeps track of channels for which a syncChannel has been started\n\tchannels := make(map[string]struct{})\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase cc := <-c.channelChan:\n\t\t\tc.m.Lock()\n\t\t\tif _, ok := channels[cc]; !ok {\n\t\t\t\tchannels[cc] = struct{}{}\n\t\t\t\tc.logger.Printf(\"starting redis channel %q sync\", cc)\n\t\t\t\tgo c.syncChannel(ctx, cc)\n\t\t\t}\n\t\t\tc.m.Unlock()\n\t\t}\n\t}\n}\n\n// syncChannel subscribes to redis channel updates and syncs the local cache\nfunc (c *redisCache) syncChannel(ctx context.Context, channel string) {\n\tsub := c.c.PSubscribe(ctx, fmt.Sprintf(\"%s*\", channel))\n\tdefer sub.Close()\n\ti := 0\n\tfor {\n\t\tselect {\n\t\tcase msg := <-sub.Channel():\n\t\t\tif len(msg.Payload) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tm := new(gnmi.SubscribeResponse)\n\t\t\terr := proto.Unmarshal([]byte(msg.Payload), m)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed to unmarshal proto msg: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.oc.Write(ctx, channel, m)\n\t\t\ti++\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *redisCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification {\n\treturn c.oc.Subscribe(ctx, ro)\n}\n\nfunc (c *redisCache) Stop() {\n\tc.cfn()\n\tif c.c != nil {\n\t\tc.c.Close()\n\t}\n}\n\nfunc (c *redisCache) DeleteTarget(name string) {\n\tc.oc.DeleteTarget(name)\n}\n"
  },
  {
    "path": "pkg/cmd/capabilities/capabilities.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage capabilities\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// capabilitiesCmd represents the capabilities command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"capabilities\",\n\t\tAliases:      []string{\"cap\"},\n\t\tShort:        \"query targets gnmi capabilities\",\n\t\tPreRunE:      gApp.CapPreRunE,\n\t\tRunE:         gApp.CapRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitCapabilitiesFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/collector/collector.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/openconfig/gnmic/pkg/collector\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\n// New create the collector command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tc := collector.New(gApp.Context(), gApp.Store)\n\tcmd := &cobra.Command{\n\t\tUse:     \"collect\",\n\t\tAliases: []string{\"c\", \"coll\", \"collector\"},\n\t\tShort:   \"collect gNMI telemetry from targets\",\n\t\tPreRunE: c.CollectorPreRunE,\n\t\tRunE:    c.CollectorRunE,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tc.InitCollectorFlags(cmd)\n\tcmd.AddCommand(newCollectorTargetsCmd(gApp))\n\tcmd.AddCommand(newCollectorSubscriptionsCmd(gApp))\n\tcmd.AddCommand(newCollectorOutputsCmd(gApp))\n\tcmd.AddCommand(newCollectorProcessorsCmd(gApp))\n\tcmd.AddCommand(newCollectorInputsCmd(gApp))\n\treturn cmd\n}\n\nfunc getAPIServerURL(store store.Store[any]) (string, error) {\n\tapiServerConfig, ok, err := store.Get(\"api-server\", \"api-server\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"api-server config not found\")\n\t}\n\tapiCfg, ok := apiServerConfig.(*config.APIServer)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"api-server config is required for collector command\")\n\t}\n\tif apiCfg == nil {\n\t\treturn \"\", fmt.Errorf(\"api-server config is required for collector command\")\n\t}\n\tif apiCfg.TLS != nil {\n\t\treturn \"https://\" + apiCfg.Address, nil\n\t}\n\treturn \"http://\" + apiCfg.Address, nil\n}\n\nfunc getAPIServerClient(store store.Store[any]) (*http.Client, error) {\n\tapiServerConfig, ok, err := store.Get(\"api-server\", \"api-server\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"api-server config not found\")\n\t}\n\tapiCfg, ok := apiServerConfig.(*config.APIServer)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"address not found\")\n\t}\n\tif apiCfg.TLS != nil {\n\t\treturn &http.Client{\n\t\t\tTimeout: apiCfg.Timeout,\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\treturn &http.Client{\n\t\tTimeout: apiCfg.Timeout,\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/cmd/collector/inputs.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"sort\"\n\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCollectorInputsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"inputs\",\n\t\tAliases:      []string{\"input\", \"in\"},\n\t\tShort:        \"manage inputs\",\n\t\tSilenceUsage: true,\n\t}\n\tcmd.AddCommand(\n\t\tnewCollectorInputsListCmd(gApp),\n\t\tnewCollectorInputsGetCmd(gApp),\n\t\tnewCollectorInputsSetCmd(gApp),\n\t\tnewCollectorInputsDeleteCmd(gApp),\n\t)\n\treturn cmd\n}\n\nfunc newCollectorInputsListCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"list\",\n\t\tAliases:      []string{\"ls\"},\n\t\tShort:        \"list inputs\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/inputs\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to list inputs, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\t// Parse the response as array of maps\n\t\t\tinputsResponse := make(map[string]interface{}, 0)\n\t\t\terr = json.Unmarshal(tb, &inputsResponse)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tinputs := make([]map[string]interface{}, 0)\n\t\t\tfor name, input := range inputsResponse {\n\t\t\t\tswitch input := input.(type) {\n\t\t\t\tcase map[string]any:\n\t\t\t\t\tinput[\"name\"] = name\n\t\t\t\t\tinputs = append(inputs, input)\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"unknown input type: %T\", input)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Display as horizontal table\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"Name\", \"Type\", \"Format\", \"Event Processors\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\n\t\t\tdata := tableFormatInputsList(inputs)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc newCollectorInputsGetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"get\",\n\t\tAliases:      []string{\"g\", \"show\", \"sh\"},\n\t\tShort:        \"get an input\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"input name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/inputs/\" + name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to get input, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\t// Parse the response as a map\n\t\t\tinput := make(map[string]any)\n\t\t\terr = json.Unmarshal(tb, &input)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Display as vertical table (key-value pairs)\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"PARAM\", \"VALUE\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(false)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\":\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\t\t\ttable.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT})\n\n\t\t\tdata := tableFormatInputVertical(input)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"input name\")\n\treturn cmd\n}\n\nfunc newCollectorInputsSetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"set\",\n\t\tAliases:      []string{\"create\", \"cr\"},\n\t\tShort:        \"set an input\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tinputConfigFile, err := cmd.Flags().GetString(\"input\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif inputConfigFile == \"\" {\n\t\t\t\treturn fmt.Errorf(\"input file is required\")\n\t\t\t}\n\t\t\tb, err := os.ReadFile(inputConfigFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar inputConfig map[string]interface{}\n\t\t\terr = json.Unmarshal(b, &inputConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Post(apiURL+\"/api/v1/config/inputs\", \"application/json\", bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to create input, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tinputName := formatValue(inputConfig[\"name\"])\n\t\t\tfmt.Fprintf(os.Stderr, \"Input '%s' created successfully\\n\", inputName)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"input\", \"i\", \"\", \"input config file\")\n\treturn cmd\n}\n\nfunc newCollectorInputsDeleteCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"delete\",\n\t\tAliases:      []string{\"d\", \"del\", \"rm\"},\n\t\tShort:        \"delete an input\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"input name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq, err := http.NewRequest(http.MethodDelete, apiURL+\"/api/v1/config/inputs/\"+name, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to delete input, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintln(os.Stderr, \"Input deleted successfully\")\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"input name\")\n\treturn cmd\n}\n\n// tableFormatOutputVertical formats a single output as vertical table (key-value pairs)\nfunc tableFormatInputVertical(input map[string]any) [][]string {\n\tdata := make([][]string, 0)\n\t// Sort keys for consistent output\n\tkeys := make([]string, 0, len(input))\n\tfor k := range input {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\t// Add each key-value pair\n\tfor _, key := range keys {\n\t\tvalue := input[key]\n\t\tformattedValue := formatValue(value)\n\t\tdata = append(data, []string{key, formattedValue})\n\t}\n\n\treturn data\n}\n\n// tableFormatInputsList formats multiple outputs as horizontal table (summary view)\nfunc tableFormatInputsList(inputs []map[string]any) [][]string {\n\tdata := make([][]string, 0, len(inputs))\n\n\tfor _, input := range inputs {\n\t\tname := formatValue(input[\"name\"])\n\t\tinputType := formatValue(input[\"type\"])\n\t\tformat := formatValue(input[\"format\"])\n\n\t\t// Handle event-processors\n\t\teventProcessors := \"-\"\n\t\tif ep, ok := input[\"event-processors\"]; ok {\n\t\t\teventProcessors = formatValueShort(ep)\n\t\t}\n\n\t\tdata = append(data, []string{\n\t\t\tname,\n\t\t\tinputType,\n\t\t\tformat,\n\t\t\teventProcessors,\n\t\t})\n\t}\n\n\t// Sort by name\n\tsort.Slice(data, func(i, j int) bool {\n\t\treturn data[i][0] < data[j][0]\n\t})\n\n\treturn data\n}\n"
  },
  {
    "path": "pkg/cmd/collector/ouputs.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"sort\"\n\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCollectorOutputsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"outputs\",\n\t\tAliases: []string{\"output\", \"out\"},\n\t\tShort:   \"manage outputs\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.AddCommand(\n\t\tnewCollectorOutputsListCmd(gApp),\n\t\tnewCollectorOutputsGetCmd(gApp),\n\t\tnewCollectorOutputsSetCmd(gApp),\n\t\tnewCollectorOutputsDeleteCmd(gApp),\n\t)\n\treturn cmd\n}\n\nfunc newCollectorOutputsListCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"list\",\n\t\tAliases: []string{\"ls\"},\n\t\tShort:   \"list outputs\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/outputs\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to list outputs, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\t// Parse the response as array of maps\n\t\t\toutputsResponse := make(map[string]interface{}, 0)\n\t\t\terr = json.Unmarshal(tb, &outputsResponse)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutputs := make([]map[string]interface{}, 0)\n\t\t\tfor name, output := range outputsResponse {\n\t\t\t\tswitch output := output.(type) {\n\t\t\t\tcase map[string]any:\n\t\t\t\t\toutput[\"name\"] = name\n\t\t\t\t\toutputs = append(outputs, output)\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"unknown output type: %T\", output)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Display as horizontal table\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"Name\", \"Type\", \"Format\", \"Event Processors\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\n\t\t\tdata := tableFormatOutputsList(outputs)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc newCollectorOutputsGetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"get\",\n\t\tAliases: []string{\"g\", \"show\", \"sh\"},\n\t\tShort:   \"get an output\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"output name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/outputs/\" + name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to get output, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\t// Parse the response as a map\n\t\t\toutput := make(map[string]any)\n\t\t\terr = json.Unmarshal(tb, &output)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Display as vertical table (key-value pairs)\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"PARAM\", \"VALUE\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(false)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\":\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\t\t\ttable.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT})\n\n\t\t\tdata := tableFormatOutputVertical(output)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"output name\")\n\treturn cmd\n}\n\nfunc newCollectorOutputsSetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"set\",\n\t\tAliases: []string{\"create\", \"cr\"},\n\t\tShort:   \"set an output\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tinputConfig, err := cmd.Flags().GetString(\"input\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif inputConfig == \"\" {\n\t\t\t\treturn fmt.Errorf(\"input file is required\")\n\t\t\t}\n\t\t\tb, err := os.ReadFile(inputConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar outputConfig map[string]interface{}\n\t\t\terr = json.Unmarshal(b, &outputConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Post(apiURL+\"/api/v1/config/outputs\", \"application/json\", bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to create output, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\toutputName := formatValue(outputConfig[\"name\"])\n\t\t\tfmt.Fprintf(os.Stderr, \"Output '%s' created successfully\\n\", outputName)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"input\", \"i\", \"\", \"output config file\")\n\treturn cmd\n}\n\nfunc newCollectorOutputsDeleteCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"delete\",\n\t\tAliases: []string{\"d\", \"del\", \"rm\"},\n\t\tShort:   \"delete an output\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"output name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq, err := http.NewRequest(http.MethodDelete, apiURL+\"/api/v1/config/outputs/\"+name, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to delete output, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintln(os.Stderr, \"Output deleted successfully\")\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"output name\")\n\treturn cmd\n}\n\n// tableFormatOutputVertical formats a single output as vertical table (key-value pairs)\nfunc tableFormatOutputVertical(output map[string]any) [][]string {\n\tdata := make([][]string, 0)\n\n\t// Sort keys for consistent output\n\tkeys := make([]string, 0, len(output))\n\tfor k := range output {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\t// Add each key-value pair\n\tfor _, key := range keys {\n\t\tvalue := output[key]\n\t\tformattedValue := formatValue(value)\n\t\tdata = append(data, []string{key, formattedValue})\n\t}\n\n\treturn data\n}\n\n// tableFormatOutputsList formats multiple outputs as horizontal table (summary view)\nfunc tableFormatOutputsList(outputs []map[string]any) [][]string {\n\tdata := make([][]string, 0, len(outputs))\n\n\tfor _, output := range outputs {\n\t\tname := formatValue(output[\"name\"])\n\t\toutputType := formatValue(output[\"type\"])\n\t\tformat := formatValue(output[\"format\"])\n\n\t\t// Handle event-processors\n\t\teventProcessors := \"-\"\n\t\tif ep, ok := output[\"event-processors\"]; ok {\n\t\t\teventProcessors = formatValueShort(ep)\n\t\t}\n\n\t\tdata = append(data, []string{\n\t\t\tname,\n\t\t\toutputType,\n\t\t\tformat,\n\t\t\teventProcessors,\n\t\t})\n\t}\n\n\t// Sort by name\n\tsort.Slice(data, func(i, j int) bool {\n\t\treturn data[i][0] < data[j][0]\n\t})\n\n\treturn data\n}\n"
  },
  {
    "path": "pkg/cmd/collector/processors.go",
    "content": "package collector\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/hairyhenderson/yaml\"\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\tapiserver \"github.com/openconfig/gnmic/pkg/collector/api/server\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCollectorProcessorsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"processors\",\n\t\tAliases:      []string{\"processor\", \"proc\"},\n\t\tShort:        \"manage processors\",\n\t\tSilenceUsage: true,\n\t}\n\tcmd.AddCommand(\n\t\tnewCollectorProcessorsListCmd(gApp),\n\t\tnewCollectorProcessorsGetCmd(gApp),\n\t\tnewCollectorProcessorsSetCmd(gApp),\n\t\tnewCollectorProcessorsDeleteCmd(gApp),\n\t)\n\treturn cmd\n}\n\nfunc newCollectorProcessorsListCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"list\",\n\t\tAliases: []string{\"ls\"},\n\t\tShort:   \"list processors\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tdetailsFlag, err := cmd.Flags().GetBool(\"details\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/processors\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to list processors, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\t\t\tprocessors := make([]apiserver.ProcessorConfigResponse, 0)\n\t\t\terr = json.Unmarshal(tb, &processors)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\tif detailsFlag {\n\t\t\t\ttable.SetHeader([]string{\"Name\", \"Type\", \"Config\"})\n\t\t\t} else {\n\t\t\t\ttable.SetHeader([]string{\"Name\", \"Type\"})\n\t\t\t}\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\n\t\t\tdata := tableFormatProcessorsList(processors, detailsFlag)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().BoolP(\"details\", \"\", false, \"show processors details\")\n\treturn cmd\n}\n\nfunc newCollectorProcessorsGetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"get\",\n\t\tAliases:      []string{\"get\"},\n\t\tShort:        \"get a processor\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"processor name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/config/processors/\" + name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tprocessorBytes, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to get processor, status code: %d: %s\", resp.StatusCode, string(processorBytes))\n\t\t\t}\n\t\t\tprocessor := new(apiserver.ProcessorConfigResponse)\n\t\t\terr = json.Unmarshal(processorBytes, processor)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"Name\", \"Type\", \"Config\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\t\t\ttable.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT})\n\t\t\tdata := tableFormatProcessorVertical(*processor)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"processor name\")\n\tcmd.MarkFlagRequired(\"name\")\n\n\treturn cmd\n}\n\nfunc newCollectorProcessorsSetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"set\",\n\t\tAliases:      []string{\"set\", \"create\", \"cr\"},\n\t\tShort:        \"set a processor\",\n\t\tSilenceUsage: true,\n\t}\n\treturn cmd\n}\n\nfunc newCollectorProcessorsDeleteCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"delete\",\n\t\tAliases:      []string{\"delete\"},\n\t\tShort:        \"delete a processor\",\n\t\tSilenceUsage: true,\n\t}\n\treturn cmd\n}\n\nfunc tableFormatProcessorsList(processors []apiserver.ProcessorConfigResponse, detailsFlag bool) [][]string {\n\tdata := make([][]string, 0, len(processors))\n\tfor _, processor := range processors {\n\t\tif detailsFlag {\n\t\t\tdata = append(data, []string{processor.Name, processor.Type, formatProcessorConfig(processor.Config)})\n\t\t} else {\n\t\t\tdata = append(data, []string{processor.Name, processor.Type})\n\t\t}\n\t}\n\treturn data\n}\n\nfunc tableFormatProcessorVertical(processor apiserver.ProcessorConfigResponse) [][]string {\n\tdata := make([][]string, 0, 1)\n\tdata = append(data, []string{processor.Name, processor.Type, formatProcessorConfig(processor.Config)})\n\treturn data\n}\n\nfunc formatProcessorConfig(config any) string {\n\tb, err := yaml.Marshal(config)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n"
  },
  {
    "path": "pkg/cmd/collector/subscriptions.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\tapiserver \"github.com/openconfig/gnmic/pkg/collector/api/server\"\n\t\"github.com/spf13/cobra\"\n\t\"gopkg.in/yaml.v2\"\n)\n\nfunc newCollectorSubscriptionsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"subscriptions\",\n\t\tAliases:      []string{\"subscription\", \"sub\"},\n\t\tShort:        \"manage subscriptions\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.AddCommand(\n\t\tnewCollectorSubscriptionsListCmd(gApp),\n\t\tnewCollectorSubscriptionsGetCmd(gApp),\n\t\tnewCollectorSubscriptionsSetCmd(gApp),\n\t\tnewCollectorSubscriptionsDeleteCmd(gApp),\n\t)\n\treturn cmd\n}\n\nfunc newCollectorSubscriptionsListCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"list\",\n\t\tAliases:      []string{\"ls\"},\n\t\tShort:        \"list subscriptions\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/subscriptions\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to list subscriptions, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\t\t\t// Parse the response\n\t\t\tsubs := make([]*apiserver.SubscriptionResponse, 0)\n\t\t\terr = json.Unmarshal(tb, &subs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// if len(subs) == 0 {\n\t\t\t// \tfmt.Println(\"No subscriptions found\")\n\t\t\t// \treturn nil\n\t\t\t// }\n\n\t\t\t// Sort by name\n\t\t\tsort.Slice(subs, func(i, j int) bool {\n\t\t\t\treturn subs[i].Name < subs[j].Name\n\t\t\t})\n\t\t\t// Display as horizontal table\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"Name\", \"Prefix\", \"Paths\", \"Encoding\", \"Mode\", \"Sample Interval\", \"Targets\", \"Outputs\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\n\t\t\tdata := tableFormatSubscriptionsList(subs)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc newCollectorSubscriptionsGetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"get\",\n\t\tAliases:      []string{\"g\", \"show\", \"sh\"},\n\t\tShort:        \"get a subscription\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"subscription name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/subscriptions/\" + name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to get subscription, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\t\t\t// Parse the response\n\t\t\tsubs := new(apiserver.SubscriptionResponse)\n\t\t\terr = json.Unmarshal(tb, subs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Display as vertical table (key-value pairs)\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(false)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\":\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\t\t\ttable.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT})\n\n\t\t\tdata := tableFormatSubscriptionVertical(subs)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"subscription name\")\n\treturn cmd\n}\n\nfunc newCollectorSubscriptionsSetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"set\",\n\t\tAliases: []string{\"create\", \"cr\"},\n\t\tShort:   \"set a subscription\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tinputConfig, err := cmd.Flags().GetString(\"input\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif inputConfig == \"\" {\n\t\t\t\treturn fmt.Errorf(\"input file is required\")\n\t\t\t}\n\t\t\tsubConfig, b, err := readSubscriptionConfigFromFile(inputConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Post(apiURL+\"/api/v1/config/subscriptions\", \"application/json\", bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to create subscription, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintf(os.Stderr, \"Subscription '%s' created successfully\\n\", subConfig.Name)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"input\", \"i\", \"\", \"subscription config file\")\n\treturn cmd\n}\n\nfunc newCollectorSubscriptionsDeleteCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"delete\",\n\t\tAliases: []string{\"d\", \"del\", \"rm\"},\n\t\tShort:   \"delete a subscription\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"subscription name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq, err := http.NewRequest(http.MethodDelete, apiURL+\"/api/v1/config/subscriptions/\"+name, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to delete subscription, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintf(os.Stderr, \"Subscription '%s' deleted successfully\\n\", name)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"subscription name\")\n\treturn cmd\n}\n\n// formatSubscriptionMode formats the mode and stream mode\nfunc formatSubscriptionMode(sub *types.SubscriptionConfig) string {\n\tif sub.Mode == \"\" {\n\t\tsub.Mode = \"STREAM\"\n\t}\n\tif strings.ToLower(sub.Mode) == \"stream\" && sub.StreamMode == \"\" {\n\t\tsub.StreamMode = \"TARGET_DEFINED\"\n\t}\n\tif strings.ToLower(sub.Mode) == \"stream\" && sub.StreamMode != \"\" && len(sub.StreamSubscriptions) == 0 {\n\t\treturn fmt.Sprintf(\"%s/%s\", strings.ToLower(sub.Mode), strings.ToLower(sub.StreamMode))\n\t}\n\tif sub.Mode != \"\" {\n\t\treturn strings.ToLower(sub.Mode)\n\t}\n\treturn \"-\"\n}\n\nfunc formatSubscriptionConfigVertical(prefix string, cfg *types.SubscriptionConfig) [][]string {\n\tif cfg == nil {\n\t\treturn [][]string{}\n\t}\n\n\tdata := [][]string{\n\t\t{prefix + \"Prefix\", formatValue(cfg.Prefix)},\n\t\t{prefix + \"Target\", formatValue(cfg.Target)},\n\t\t{prefix + \"Set Target\", fmt.Sprintf(\"%t\", cfg.SetTarget)},\n\t\t{prefix + \"Paths\", formatValue(cfg.Paths)},\n\t\t{prefix + \"Encoding\", formatValue(cfg.Encoding)},\n\t\t{prefix + \"Mode\", formatSubscriptionMode(cfg)},\n\t\t{prefix + \"Sample Interval\", formatValue(cfg.SampleInterval)},\n\t\t{prefix + \"Heartbeat Interval\", formatValue(cfg.HeartbeatInterval)},\n\t\t{prefix + \"Outputs\", formatValue(cfg.Outputs)},\n\t\t{prefix + \"Models\", formatValue(cfg.Models)},\n\t\t{prefix + \"QoS\", formatValue(cfg.Qos)},\n\t\t{prefix + \"Depth\", formatValue(cfg.Depth)},\n\t\t{prefix + \"Suppress Redundant\", fmt.Sprintf(\"%t\", cfg.SuppressRedundant)},\n\t\t{prefix + \"Updates Only\", fmt.Sprintf(\"%t\", cfg.UpdatesOnly)},\n\t}\n\n\t// History section (if present)\n\tif cfg.History != nil {\n\t\tif !cfg.History.Snapshot.IsZero() {\n\t\t\tdata = append(data, []string{prefix + \"History Snapshot\", cfg.History.Snapshot.String()})\n\t\t}\n\t\tif !cfg.History.Start.IsZero() {\n\t\t\tdata = append(data, []string{prefix + \"History Start\", cfg.History.Start.String()})\n\t\t}\n\t\tif !cfg.History.End.IsZero() {\n\t\t\tdata = append(data, []string{prefix + \"History End\", cfg.History.End.String()})\n\t\t}\n\t}\n\n\treturn data\n}\n\nfunc formatStreamSubscriptionConfigVertical(prefix string, cfg *types.SubscriptionConfig) [][]string {\n\tif cfg == nil {\n\t\treturn [][]string{}\n\t}\n\n\tdata := [][]string{\n\t\t{prefix + \"Paths\", formatValue(cfg.Paths)},\n\t\t{prefix + \"Mode\", formatSubscriptionMode(cfg)},\n\t\t{prefix + \"Sample Interval\", formatValue(cfg.SampleInterval)},\n\t\t{prefix + \"Heartbeat Interval\", formatValue(cfg.HeartbeatInterval)},\n\t}\n\n\treturn data\n}\n\nfunc tableFormatSubscriptionVertical(sub *apiserver.SubscriptionResponse) [][]string {\n\tif sub.Config == nil {\n\t\treturn [][]string{{\"Name\", sub.Name}}\n\t}\n\n\tdata := [][]string{\n\t\t{\"Name\", sub.Name},\n\t}\n\n\t// Main subscription config\n\tdata = append(data, formatSubscriptionConfigVertical(\"\", sub.Config)...)\n\n\t// Targets (top-level only)\n\tif len(sub.Targets) > 0 {\n\t\ttargetNames := make([]string, 0, len(sub.Targets))\n\t\tfor name := range sub.Targets {\n\t\t\ttargetNames = append(targetNames, name)\n\t\t}\n\t\tsort.Strings(targetNames)\n\n\t\tvar targetInfo []string\n\t\tfor _, name := range targetNames {\n\t\t\ttargetInfo = append(targetInfo, fmt.Sprintf(\"%s (%s)\", name, sub.Targets[name].State))\n\t\t}\n\n\t\tdata = append(data, []string{\"Targets\", strings.Join(targetInfo, \"\\n\")})\n\t}\n\n\t// Stream Subscriptions\n\tfor _, sc := range sub.Config.StreamSubscriptions {\n\t\theader := []string{fmt.Sprintf(\"\\tStream Subscription: %s\", sc.Name), \"\"}\n\n\t\tdata = append(data, []string{header[0], header[1]})\n\n\t\t// Indent keys to show hierarchy cleanly\n\t\tdata = append(data, formatStreamSubscriptionConfigVertical(\"  \", sc)...)\n\t}\n\n\treturn data\n}\n\n// tableFormatSubscriptionsList formats multiple subscriptions as horizontal table (summary view)\nfunc tableFormatSubscriptionsList(subs []*apiserver.SubscriptionResponse) [][]string {\n\tdata := make([][]string, 0, len(subs))\n\n\tfor _, sub := range subs {\n\t\tif sub.Config == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Add main subscription row\n\t\tdata = append(data, formatSubscriptionRow(\n\t\t\tsub.Name,\n\t\t\tsub.Config,\n\t\t\tsub.Targets,\n\t\t))\n\n\t\t// Add stream-subscriptions (children)\n\t\tfor i, sc := range sub.Config.StreamSubscriptions {\n\t\t\t// Indent child name for visual grouping\n\t\t\tchildName := \"↳\" + fmt.Sprintf(\"[%d]\", i) + sub.Name\n\n\t\t\tdata = append(data, formatSubscriptionRow(\n\t\t\t\tchildName,\n\t\t\t\tsc,\n\t\t\t\tnil, // children have no targets\n\t\t\t))\n\t\t}\n\t}\n\n\treturn data\n}\n\nfunc formatSubscriptionRow(\n\tname string,\n\tcfg *types.SubscriptionConfig,\n\ttargets map[string]*apiserver.TargetStateInfo,\n) []string {\n\n\t// Paths\n\tpaths := \"-\"\n\tif len(cfg.Paths) > 0 {\n\t\tpaths = strings.Join(cfg.Paths, \"\\n\")\n\t}\n\n\t// Targets summary\n\ttargetsStr := \"-\"\n\tif len(targets) > 0 {\n\t\tnames := make([]string, 0, len(targets))\n\t\tfor n := range targets {\n\t\t\tnames = append(names, n)\n\t\t}\n\t\tsort.Strings(names)\n\n\t\trunning, disabled := 0, 0\n\t\tfor _, n := range names {\n\t\t\tif targets[n].State == \"running\" {\n\t\t\t\trunning++\n\t\t\t} else {\n\t\t\t\tdisabled++\n\t\t\t}\n\t\t}\n\n\t\ttargetsStr = fmt.Sprintf(\"%d/%d\", running, len(targets))\n\t\tif disabled > 0 {\n\t\t\ttargetsStr += fmt.Sprintf(\" (%d disabled)\", disabled)\n\t\t}\n\t}\n\n\treturn []string{\n\t\tname,\n\t\tformatValue(cfg.Prefix),\n\t\tpaths,\n\t\tformatSubscriptionEncoding(cfg.Encoding),\n\t\tformatSubscriptionMode(cfg),\n\t\tformatValue(cfg.SampleInterval),\n\t\ttargetsStr,\n\t\tformatValueShort(cfg.Outputs),\n\t}\n}\n\nfunc readSubscriptionConfigFromFile(filename string) (*types.SubscriptionConfig, []byte, error) {\n\tb, err := os.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcfg := make(map[string]any)\n\tswitch strings.ToLower(filepath.Ext(filename)) {\n\tcase \".json\":\n\t\terr = json.Unmarshal(b, &cfg)\n\tcase \".yaml\", \".yml\":\n\t\terr = yaml.Unmarshal(b, &cfg)\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"unsupported file type: %s\", filepath.Ext(filename))\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsubConfig := new(types.SubscriptionConfig)\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     subConfig,\n\t\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\terr = decoder.Decode(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn subConfig, b, nil\n}\n\nfunc formatSubscriptionEncoding(encoding *string) string {\n\tif encoding == nil {\n\t\treturn \"json\"\n\t}\n\tif *encoding == \"\" {\n\t\treturn \"json\"\n\t}\n\treturn formatValue(*encoding)\n}\n"
  },
  {
    "path": "pkg/cmd/collector/targets.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/hairyhenderson/yaml\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\tapiserver \"github.com/openconfig/gnmic/pkg/collector/api/server\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCollectorTargetsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"targets\",\n\t\tAliases:      []string{\"target\", \"tg\"},\n\t\tShort:        \"manage targets\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.AddCommand(newCollectorTargetsListCmd(gApp))\n\tcmd.AddCommand(newCollectorTargetsGetCmd(gApp))\n\tcmd.AddCommand(newCollectorTargetsSetCmd(gApp))\n\tcmd.AddCommand(newCollectorTargetsDeleteCmd(gApp))\n\treturn cmd\n}\n\nfunc newCollectorTargetsListCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"list\",\n\t\tAliases:      []string{\"ls\"},\n\t\tShort:        \"list targets\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/targets\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to list targets, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\t// Parse the response\n\t\t\ttc := make([]*apiserver.TargetResponse, 0)\n\t\t\terr = json.Unmarshal(tb, &tc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Display as horizontal table\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetHeader([]string{\"Name\", \"Address\", \"Username\", \"State\", \"Subscriptions\", \"Outputs\", \"Insecure\", \"Skip Verify\"})\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(true)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\"\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\n\t\t\tdata := tableFormatTargetsList(tc)\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc newCollectorTargetsGetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"get\",\n\t\tAliases:      []string{\"g\", \"show\", \"sh\"},\n\t\tShort:        \"get a target\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"target name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Get(apiURL + \"/api/v1/targets/\" + name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\ttb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"failed to get target, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\ttc := make([]*apiserver.TargetResponse, 0)\n\t\t\terr = json.Unmarshal(tb, &tc)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(tc) == 0 {\n\t\t\t\treturn fmt.Errorf(\"no targets found\")\n\t\t\t}\n\n\t\t\t// Display as vertical table (key-value pairs)\n\t\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\t\ttable.SetAutoWrapText(false)\n\t\t\ttable.SetAutoFormatHeaders(false)\n\t\t\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\t\t\ttable.SetCenterSeparator(\"\")\n\t\t\ttable.SetColumnSeparator(\":\")\n\t\t\ttable.SetRowSeparator(\"\")\n\t\t\ttable.SetHeaderLine(false)\n\t\t\ttable.SetBorder(false)\n\t\t\ttable.SetTablePadding(\"\\t\")\n\t\t\ttable.SetNoWhiteSpace(true)\n\t\t\ttable.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT})\n\n\t\t\tdata := tableFormatTargetVertical(tc[0])\n\t\t\ttable.AppendBulk(data)\n\t\t\ttable.Render()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"target name\")\n\treturn cmd\n}\n\nfunc newCollectorTargetsSetCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"set\",\n\t\tAliases:      []string{\"create\", \"cr\"},\n\t\tShort:        \"set a target\",\n\t\tSilenceUsage: true,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tinputConfig, err := cmd.Flags().GetString(\"input\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttargetConfig, b, err := readTargetConfigFromFile(inputConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Post(apiURL+\"/api/v1/config/targets\", \"application/json\", bytes.NewBuffer(b))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to create target, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintf(os.Stderr, \"Target '%s' created successfully\\n\", targetConfig.Name)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"input\", \"i\", \"\", \"target file input\")\n\treturn cmd\n}\n\nfunc newCollectorTargetsDeleteCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"delete\",\n\t\tAliases: []string{\"d\", \"del\", \"rm\"},\n\t\tShort:   \"delete a target\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tname, err := cmd.Flags().GetString(\"name\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"target name is required\")\n\t\t\t}\n\t\t\tapiURL, err := getAPIServerURL(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclient, err := getAPIServerClient(gApp.Store)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq, err := http.NewRequest(http.MethodDelete, apiURL+\"/api/v1/config/targets/\"+name, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\ttb, _ := io.ReadAll(resp.Body)\n\t\t\t\treturn fmt.Errorf(\"failed to delete target, status code: %d: %s\", resp.StatusCode, string(tb))\n\t\t\t}\n\n\t\t\tfmt.Fprintf(os.Stderr, \"Target '%s' deleted successfully\\n\", name)\n\t\t\treturn nil\n\t\t},\n\t}\n\tcmd.Flags().StringP(\"name\", \"n\", \"\", \"target name\")\n\treturn cmd\n}\n\n// formatValue formats any value based on its type for table display\nfunc formatValue(v any) string {\n\tif v == nil {\n\t\treturn \"-\"\n\t}\n\n\tswitch val := v.(type) {\n\tcase *string:\n\t\tif val == nil {\n\t\t\treturn \"-\"\n\t\t}\n\t\tif *val == \"\" {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn *val\n\tcase string:\n\t\tif val == \"\" {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn val\n\tcase *bool:\n\t\tif val == nil {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%t\", *val)\n\tcase bool:\n\t\treturn fmt.Sprintf(\"%t\", val)\n\tcase *int:\n\t\tif val == nil {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d\", *val)\n\tcase int:\n\t\tif val == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d\", val)\n\tcase uint:\n\t\tif val == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d\", val)\n\tcase []string:\n\t\tif len(val) == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn strings.Join(val, \", \")\n\tcase map[string]string:\n\t\tif len(val) == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\tvar parts []string\n\t\tfor k, v := range val {\n\t\t\tparts = append(parts, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t}\n\t\tsort.Strings(parts)\n\t\treturn strings.Join(parts, \", \")\n\tdefault:\n\t\tstr := fmt.Sprintf(\"%v\", val)\n\t\tif str == \"\" || str == \"0s\" || str == \"<nil>\" {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn str\n\t}\n}\n\n// formatValueShort formats value for list view (shorter version)\nfunc formatValueShort(v any) string {\n\tif v == nil {\n\t\treturn \"-\"\n\t}\n\n\tswitch val := v.(type) {\n\tcase []string:\n\t\tif len(val) == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d\", len(val))\n\tcase map[string]string:\n\t\tif len(val) == 0 {\n\t\t\treturn \"-\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%d\", len(val))\n\tdefault:\n\t\treturn formatValue(val)\n\t}\n}\n\n// tableFormatTargetVertical formats a single target as vertical table (key-value pairs)\nfunc tableFormatTargetVertical(target *apiserver.TargetResponse) [][]string {\n\tcfg := target.Config\n\tdata := [][]string{\n\t\t{\"Name\", target.Name},\n\t\t{\"State\", target.State.State},\n\t\t{\"Address\", formatValue(cfg.Address)},\n\t\t{\"Username\", formatValue(cfg.Username)},\n\t\t{\"Password\", formatValue(cfg.Password)},\n\t\t{\"Auth Scheme\", formatValue(cfg.AuthScheme)},\n\t\t{\"Timeout\", formatValue(cfg.Timeout)},\n\t\t{\"Insecure\", formatValue(cfg.Insecure)},\n\t\t{\"Skip Verify\", formatValue(cfg.SkipVerify)},\n\t\t{\"TLS CA\", formatValue(cfg.TLSCA)},\n\t\t{\"TLS Cert\", formatValue(cfg.TLSCert)},\n\t\t{\"TLS Key\", formatValue(cfg.TLSKey)},\n\t\t{\"TLS Server Name\", formatValue(cfg.TLSServerName)},\n\t\t{\"TLS Min Version\", formatValue(cfg.TLSMinVersion)},\n\t\t{\"TLS Max Version\", formatValue(cfg.TLSMaxVersion)},\n\t\t{\"TLS Version\", formatValue(cfg.TLSVersion)},\n\t\t{\"Log TLS Secret\", formatValue(cfg.LogTLSSecret)},\n\t\t{\"Subscriptions\", formatValue(target.State.Subscriptions)},\n\t\t{\"Outputs\", formatValue(cfg.Outputs)},\n\t\t{\"Buffer Size\", formatValue(cfg.BufferSize)},\n\t\t{\"Retry Timer\", formatValue(cfg.RetryTimer)},\n\t\t{\"Token\", formatValue(cfg.Token)},\n\t\t{\"Proxy\", formatValue(cfg.Proxy)},\n\t\t{\"Encoding\", formatValue(cfg.Encoding)},\n\t\t{\"Tags\", formatValue(cfg.Tags)},\n\t\t{\"Event Tags\", formatValue(cfg.EventTags)},\n\t\t{\"Metadata\", formatValue(cfg.Metadata)},\n\t\t{\"Gzip\", formatValue(cfg.Gzip)},\n\t\t{\"Proto Files\", formatValue(cfg.ProtoFiles)},\n\t\t{\"Proto Dirs\", formatValue(cfg.ProtoDirs)},\n\t\t{\"Cipher Suites\", formatValue(cfg.CipherSuites)},\n\t\t{\"TCP Keepalive\", formatValue(cfg.TCPKeepalive)},\n\t\t{\"GRPC Keepalive\", formatValue(cfg.GRPCKeepalive)},\n\t\t{\"Tunnel Target Type\", formatValue(cfg.TunnelTargetType)},\n\t}\n\treturn data\n}\n\n// tableFormatTargetsList formats multiple targets as horizontal table (summary view)\nfunc tableFormatTargetsList(targets []*apiserver.TargetResponse) [][]string {\n\tdata := make([][]string, 0, len(targets))\n\tfor _, target := range targets {\n\t\tdata = append(data, []string{\n\t\t\ttarget.Name,\n\t\t\tformatValue(target.Config.Address),\n\t\t\tformatValue(target.Config.Username),\n\t\t\ttarget.State.State,\n\t\t\tformatValueShort(target.State.Subscriptions),\n\t\t\tformatValueShort(target.Config.Outputs),\n\t\t\tformatValue(target.Config.Insecure),\n\t\t\tformatValue(target.Config.SkipVerify),\n\t\t})\n\t}\n\t// Sort by name\n\tsort.Slice(data, func(i, j int) bool {\n\t\treturn data[i][0] < data[j][0]\n\t})\n\treturn data\n}\n\nfunc readTargetConfigFromFile(filename string) (*types.TargetConfig, []byte, error) {\n\tb, err := os.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcfg := make(map[string]any)\n\tswitch strings.ToLower(filepath.Ext(filename)) {\n\tcase \".json\":\n\t\terr = json.Unmarshal(b, &cfg)\n\tcase \".yaml\", \".yml\":\n\t\terr = yaml.Unmarshal(b, &cfg)\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"unsupported file type: %s\", filepath.Ext(filename))\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\ttargetConfig := new(types.TargetConfig)\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     targetConfig,\n\t\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\terr = decoder.Decode(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn targetConfig, b, nil\n}\n"
  },
  {
    "path": "pkg/cmd/completion.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// newCompletionCmd creates completion command tree.\nfunc newCompletionCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"completion [bash|zsh|fish]\",\n\t\tShort:        \"generate completion script\",\n\t\tSilenceUsage: true,\n\t\tLong: `To load completions:,\n\n\tBash:\n\n\t$ source <(gnmic completion bash)\n\n\t# To load completions for each session, execute once:\n\t# Linux:\n\t$ gnmic completion bash > /etc/bash_completion.d/gnmic\n\t# macOS:\n\t$ gnmic completion bash > /usr/local/etc/bash_completion.d/gnmic\n\n\tZsh:\n\n\t# If shell completion is not already enabled in your environment,\n\t# you will need to enable it.  You can execute the following once:\n\n\t$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n\n\t# To load completions for each session, execute once:\n\t$ gnmic completion zsh > \"${fpath[1]}/gnmic\"\n\n\t# You will need to start a new shell for this setup to take effect.\n\n\tfish:\n\n\t$ gnmic completion fish | source\n\n\t# To load completions for each session, execute once:\n\t$ gnmic completion fish > ~/.config/fish/completions/gnmic.fish\n\t`,\n\t\tDisableFlagsInUseLine: true,\n\t\tValidArgs:             []string{\"bash\", \"zsh\", \"fish\"},\n\t\tArgs:                  cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tswitch args[0] {\n\t\t\tcase \"bash\":\n\t\t\t\tcmd.Root().GenBashCompletion(os.Stdout)\n\t\t\tcase \"zsh\":\n\t\t\t\tcmd.Root().GenZshCompletion(os.Stdout)\n\t\t\tcase \"fish\":\n\t\t\t\tcmd.Root().GenFishCompletion(os.Stdout, true)\n\t\t\t}\n\t\t},\n\t}\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/diff/diff.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage diff\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// diffCmd represents the diff command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"diff\",\n\t\tAliases:      []string{\"compare\"},\n\t\tShort:        \"run a diff comparison between targets\",\n\t\tPreRunE:      gApp.DiffPreRunE,\n\t\tRunE:         gApp.DiffRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitDiffFlags(cmd)\n\tcmd.AddCommand(newDiffSetRequestCmd(gApp))\n\tcmd.AddCommand(newDiffSetToNotifsCmd(gApp))\n\treturn cmd\n}\n\n// newDiffSetRequestCmd creates a new diff setrequest command.\nfunc newDiffSetRequestCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"setrequest\",\n\t\tShort:        \"run a diff comparison between two setrequests in textproto format\",\n\t\tRunE:         gApp.DiffSetRequestRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitDiffSetRequestFlags(cmd)\n\treturn cmd\n}\n\nfunc newDiffSetToNotifsCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"set-to-notifs\",\n\t\tShort:        \"run a diff comparison between a SetRequest and a GetResponse or SubscribeResponse stream stored in textproto format\",\n\t\tRunE:         gApp.DiffSetToNotifsRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitDiffSetToNotifsFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/generate/generate.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage generate\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// generateCmd represents the generate command\nfunc New(gApp *app.App) *cobra.Command {\n\tgenCmd := &cobra.Command{\n\t\tUse:               \"generate\",\n\t\tAliases:           []string{\"gen\"},\n\t\tShort:             \"generate paths or JSON/YAML objects from YANG\",\n\t\tPersistentPreRunE: gApp.GeneratePreRunE,\n\t\tRunE:              gApp.GenerateRunE,\n\t\tSilenceUsage:      true,\n\t}\n\tgenCmd.AddCommand(newGenerateSetRequestCmd(gApp))\n\tgenCmd.AddCommand(newGeneratePathCmd(gApp))\n\n\tgApp.InitGenerateFlags(genCmd)\n\treturn genCmd\n}\n"
  },
  {
    "path": "pkg/cmd/generate/generatePath.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage generate\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// newGeneratePathCmd represents the generate path command\nfunc newGeneratePathCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:          \"path\",\n\t\tShort:        \"generate xpath(s) from yang models\",\n\t\tPreRunE:      gApp.GeneratePathPreRunE,\n\t\tRunE:         gApp.GeneratePathRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitGeneratePathFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/generate/generateSetRequest.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage generate\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// newGenerateSetRequestCmd represents the generate set-request command\nfunc newGenerateSetRequestCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"set-request\",\n\t\tAliases: []string{\"sr\", \"sreq\", \"srq\"},\n\t\tShort:   \"generate Set Request file\",\n\t\tPreRunE: func(cmd *cobra.Command, _ []string) error {\n\t\t\tgApp.Config.SetLocalFlagsFromFile(cmd)\n\t\t\treturn nil\n\t\t},\n\t\tRunE:         gApp.GenerateSetRequestRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitGenerateSetRequestFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/get/get.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage get\n\nimport (\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n)\n\nvar DataType = [][2]string{\n\t{\"all\", \"all config/state/operational data\"},\n\t{\"config\", \"data that the target considers to be read/write\"},\n\t{\"state\", \"read-only data on the target\"},\n\t{\"operational\", \"read-only data on the target that is related to software processes operating on the device, or external interactions of the device\"},\n}\n\n// getCmd represents the get command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"get\",\n\t\tShort: \"run gnmi get on targets\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--path\":   \"XPATH\",\n\t\t\t\"--prefix\": \"PREFIX\",\n\t\t\t\"--model\":  \"MODEL\",\n\t\t\t\"--type\":   \"STORE\",\n\t\t},\n\t\tPreRunE: gApp.GetPreRunE,\n\t\tRunE:    gApp.GetRun,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitGetFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/getset/getset.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage getset\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// getCmd represents the get command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"getset\",\n\t\tAliases: []string{\"gas\", \"gs\"},\n\t\tShort:   \"run gnmi get then set on targets\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--get\":    \"XPATH\",\n\t\t\t\"--prefix\": \"PREFIX\",\n\t\t\t\"--type\":   \"STORE\",\n\t\t},\n\t\tPreRunE:      gApp.GetSetPreRunE,\n\t\tRunE:         gApp.GetSetRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitGetSetFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/listener/listener.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage listener\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/fullstorydev/grpcurl\"\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\t\"github.com/jhump/protoreflect/desc\"\n\t\"github.com/jhump/protoreflect/dynamic\"\n\tnokiasros \"github.com/karimra/sros-dialout\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/spf13/cobra\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/peer\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\n// New returns the listen command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"listen\",\n\t\tShort: \"listens for telemetry dialout updates from the node\",\n\t\tPreRunE: func(cmd *cobra.Command, _ []string) error {\n\t\t\tgApp.Config.SetLocalFlagsFromFile(cmd)\n\t\t\tif len(gApp.Config.Address) == 0 {\n\t\t\t\treturn fmt.Errorf(\"no address specified\")\n\t\t\t}\n\t\t\tif len(gApp.Config.Address) > 1 {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"multiple addresses specified, listening only on %s\\n\", gApp.Config.Address[0])\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tRunE: func(cmd *cobra.Command, _ []string) error {\n\t\t\tctx, cancel := context.WithCancel(cmd.Context())\n\t\t\tdefer cancel()\n\t\t\tserver := new(dialoutTelemetryServer)\n\t\t\tserver.ctx = ctx\n\n\t\t\topts := []grpc.ServerOption{\n\t\t\t\tgrpc.MaxConcurrentStreams(gApp.Config.LocalFlags.ListenMaxConcurrentStreams),\n\t\t\t}\n\t\t\tif gApp.Config.MaxMsgSize > 0 {\n\t\t\t\topts = append(opts, grpc.MaxRecvMsgSize(gApp.Config.MaxMsgSize))\n\t\t\t}\n\n\t\t\tif gApp.Config.LocalFlags.ListenPrometheusAddress != \"\" {\n\t\t\t\tserver.reg = prometheus.NewRegistry()\n\t\t\t\tgrpcMetrics := grpc_prometheus.NewServerMetrics()\n\t\t\t\topts = append(opts,\n\t\t\t\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\t\t\t)\n\t\t\t\tserver.reg.MustRegister(grpcMetrics)\n\t\t\t}\n\n\t\t\tif len(gApp.Config.ProtoFile) > 0 {\n\t\t\t\tgApp.Logger.Printf(\"loading proto files...\")\n\t\t\t\tdescSource, err := grpcurl.DescriptorSourceFromProtoFiles(gApp.Config.ProtoDir, gApp.Config.ProtoFile...)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgApp.Logger.Printf(\"failed to load proto files: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tserver.rootDesc, err = descSource.FindSymbol(\"Nokia.SROS.root\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tgApp.Logger.Printf(\"could not get symbol 'Nokia.SROS.root': %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tgApp.Logger.Printf(\"loaded proto files\")\n\t\t\t}\n\n\t\t\tserver.Outputs = make(map[string]outputs.Output)\n\t\t\toutCfgs, err := gApp.Config.GetOutputs()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor name, outConf := range outCfgs {\n\t\t\t\tif outType, ok := outConf[\"type\"]; ok {\n\t\t\t\t\tif initializer, ok := outputs.Outputs[outType.(string)]; ok {\n\t\t\t\t\t\tout := initializer()\n\t\t\t\t\t\tgo out.Init(ctx, name, outConf,\n\t\t\t\t\t\t\toutputs.WithLogger(gApp.Logger),\n\t\t\t\t\t\t\toutputs.WithName(gApp.Config.InstanceName),\n\t\t\t\t\t\t\toutputs.WithClusterName(gApp.Config.ClusterName),\n\t\t\t\t\t\t\toutputs.WithRegistry(server.reg),\n\t\t\t\t\t\t\toutputs.WithConfigStore(gApp.Store),\n\t\t\t\t\t\t)\n\t\t\t\t\t\tserver.Outputs[name] = out\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tfor _, o := range server.Outputs {\n\t\t\t\t\to.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tserver.listener, err = net.Listen(\"tcp\", gApp.Config.Address[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tgApp.Logger.Printf(\"waiting for connections on %s\", gApp.Config.Address[0])\n\n\t\t\tif gApp.Config.TLSKey != \"\" && gApp.Config.TLSCert != \"\" {\n\t\t\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\t\t\tgApp.Config.TLSCa,\n\t\t\t\t\tgApp.Config.TLSCert,\n\t\t\t\t\tgApp.Config.TLSKey,\n\t\t\t\t\t\"request\",\n\t\t\t\t\tfalse,\n\t\t\t\t\ttrue,\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\topts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig)))\n\t\t\t}\n\n\t\t\tserver.grpcServer = grpc.NewServer(opts...)\n\t\t\tnokiasros.RegisterDialoutTelemetryServer(server.grpcServer, server)\n\n\t\t\tif gApp.Config.LocalFlags.ListenPrometheusAddress != \"\" {\n\t\t\t\tgrpc_prometheus.Register(server.grpcServer)\n\n\t\t\t\thttpServer := &http.Server{\n\t\t\t\t\tHandler: promhttp.HandlerFor(server.reg, promhttp.HandlerOpts{}),\n\t\t\t\t\tAddr:    gApp.Config.LocalFlags.ListenPrometheusAddress,\n\t\t\t\t}\n\t\t\t\tgo func() {\n\t\t\t\t\tif err := httpServer.ListenAndServe(); err != nil {\n\t\t\t\t\t\tgApp.Logger.Printf(\"Unable to start prometheus http server.\")\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\tdefer httpServer.Close()\n\t\t\t}\n\t\t\tserver.gApp = gApp\n\t\t\tserver.grpcServer.Serve(server.listener)\n\t\t\tdefer server.grpcServer.Stop()\n\t\t\treturn nil\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tcmd.Flags().Uint32P(\"max-concurrent-streams\", \"\", 256, \"max concurrent streams gnmic can receive per transport\")\n\tcmd.Flags().StringP(\"prometheus-address\", \"\", \"\", \"prometheus server address\")\n\tgApp.Config.FileConfig.BindPFlag(\"listen-max-concurrent-streams\", cmd.LocalFlags().Lookup(\"max-concurrent-streams\"))\n\tgApp.Config.FileConfig.BindPFlag(\"listen-prometheus-address\", cmd.LocalFlags().Lookup(\"prometheus-address\"))\n\treturn cmd\n}\n\ntype dialoutTelemetryServer struct {\n\tlistener   net.Listener\n\tgrpcServer *grpc.Server\n\trootDesc   desc.Descriptor\n\n\tOutputs map[string]outputs.Output\n\n\tctx context.Context\n\n\tgApp *app.App\n\treg  *prometheus.Registry\n}\n\nfunc (s *dialoutTelemetryServer) Publish(stream nokiasros.DialoutTelemetry_PublishServer) error {\n\tpeer, ok := peer.FromContext(stream.Context())\n\tif ok && s.gApp.Config.Debug {\n\t\tb, err := json.Marshal(peer)\n\t\tif err != nil {\n\t\t\ts.gApp.Logger.Printf(\"failed to marshal peer data: %v\", err)\n\t\t} else {\n\t\t\ts.gApp.Logger.Printf(\"received Publish RPC from peer=%s\", string(b))\n\t\t}\n\t}\n\tmd, ok := metadata.FromIncomingContext(stream.Context())\n\tif ok && s.gApp.Config.Debug {\n\t\tb, err := json.Marshal(md)\n\t\tif err != nil {\n\t\t\ts.gApp.Logger.Printf(\"failed to marshal context metadata: %v\", err)\n\t\t} else {\n\t\t\ts.gApp.Logger.Printf(\"received http2_header=%s\", string(b))\n\t\t}\n\t}\n\toutMeta := outputs.Meta{}\n\tif sn, ok := md[\"subscription-name\"]; ok {\n\t\tif len(sn) > 0 {\n\t\t\toutMeta[\"subscription-name\"] = sn[0]\n\t\t}\n\t} else {\n\t\ts.gApp.Logger.Println(\"could not find subscription-name in http2 headers\")\n\t}\n\toutMeta[\"source\"] = peer.Addr.String()\n\tif systemName, ok := md[\"system-name\"]; ok {\n\t\tif len(systemName) > 0 {\n\t\t\toutMeta[\"system-name\"] = systemName[0]\n\t\t}\n\t} else {\n\t\ts.gApp.Logger.Println(\"could not find system-name in http2 headers\")\n\t}\n\tfor {\n\t\tsubResp, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\ts.gApp.Logger.Printf(\"gRPC dialout receive error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\terr = stream.Send(&nokiasros.PublishResponse{})\n\t\tif err != nil {\n\t\t\ts.gApp.Logger.Printf(\"error sending publish response to server: %v\", err)\n\t\t}\n\t\tswitch resp := subResp.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\tif s.rootDesc != nil {\n\t\t\t\tfor _, update := range resp.Update.Update {\n\t\t\t\t\tswitch update.Val.Value.(type) {\n\t\t\t\t\tcase *gnmi.TypedValue_ProtoBytes:\n\t\t\t\t\t\tm := dynamic.NewMessage(s.rootDesc.GetFile().FindMessage(\"Nokia.SROS.root\"))\n\t\t\t\t\t\terr := m.Unmarshal(update.Val.GetProtoBytes())\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\ts.gApp.Logger.Printf(\"failed to unmarshal m: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tjsondata, err := m.MarshalJSON()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\ts.gApp.Logger.Printf(\"failed to marshal dynamic proto msg: %v\", err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif s.gApp.Config.Debug {\n\t\t\t\t\t\t\ts.gApp.Logger.Printf(\"json format=%s\", string(jsondata))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tupdate.Val.Value = &gnmi.TypedValue_JsonVal{JsonVal: jsondata}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, o := range s.Outputs {\n\t\t\t\tgo o.Write(s.ctx, subResp, outMeta)\n\t\t\t}\n\n\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t\ts.gApp.Logger.Printf(\"received sync response=%+v from %s\", resp.SyncResponse, outMeta[\"source\"])\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/cmd/path/path.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage path\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// New creates the path command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"path\",\n\t\tShort: \"generate gnmi or xpath style from yang file\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--file\": \"YANG\",\n\t\t\t\"--dir\":  \"DIR\",\n\t\t},\n\t\tPreRunE: gApp.PathPreRunE,\n\t\tRunE:    gApp.PathRunE,\n\t\tPostRun: func(cmd *cobra.Command, _ []string) {\n\t\t\tcmd.ResetFlags()\n\t\t\tgApp.InitPathFlags(cmd)\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitPathFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/processor/processor.go",
    "content": "package processor\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// processorCmd represents the processor command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"processor\",\n\t\tAliases: []string{\"proc\"},\n\t\tShort:   \"apply a list of processors\",\n\t\tPreRunE: gApp.ProcessorPreRunE,\n\t\tRunE:    gApp.ProcessorRunE,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitProcessorFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/prompt.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"unicode\"\n\n\tgoprompt \"github.com/c-bata/go-prompt\"\n\t\"github.com/c-bata/go-prompt/completer\"\n\thomedir \"github.com/mitchellh/go-homedir\"\n\t\"github.com/nsf/termbox-go\"\n\t\"github.com/olekukonko/tablewriter\"\n\t\"github.com/openconfig/goyang/pkg/yang\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/get\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/subscribe\"\n)\n\nvar colorMapping = map[string]goprompt.Color{\n\t\"black\":      goprompt.Black,\n\t\"dark_red\":   goprompt.DarkRed,\n\t\"dark_green\": goprompt.DarkGreen,\n\t\"brown\":      goprompt.Brown,\n\t\"dark_blue\":  goprompt.DarkBlue,\n\t\"purple\":     goprompt.Purple,\n\t\"cyan\":       goprompt.Cyan,\n\t\"light_gray\": goprompt.LightGray,\n\t\"dark_gray\":  goprompt.DarkGray,\n\t\"red\":        goprompt.Red,\n\t\"green\":      goprompt.Green,\n\t\"yellow\":     goprompt.Yellow,\n\t\"blue\":       goprompt.Blue,\n\t\"fuchsia\":    goprompt.Fuchsia,\n\t\"turquoise\":  goprompt.Turquoise,\n\t\"white\":      goprompt.White,\n}\n\nvar targetListHeader = []string{\n\t\"Name\", \"Address\", \"Username\", \"Password\", \"Insecure\", \"Skip Verify\", \"TLS CA\", \"TLS Certificate\", \"TLS Key\"}\n\nvar subscriptionListHeader = []string{\"Name\", \"Mode\", \"Prefix\", \"Paths\", \"Interval\", \"Encoding\"}\n\nfunc getColor(flagName string) goprompt.Color {\n\tswitch flagName {\n\tcase \"prefix-color\":\n\t\tif cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptPrefixColor]; ok {\n\t\t\treturn cgoprompt\n\t\t}\n\tcase \"suggestions-bg-color\":\n\t\tif cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptSuggestionsBGColor]; ok {\n\t\t\treturn cgoprompt\n\t\t}\n\tcase \"description-bg-color\":\n\t\tif cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptDescriptionBGColor]; ok {\n\t\t\treturn cgoprompt\n\t\t}\n\t}\n\tdefColor := \"yellow\"\n\tpromptModeCmd.Flags().VisitAll(\n\t\tfunc(f *pflag.Flag) {\n\t\t\tif f.Name == flagName {\n\t\t\t\tdefColor = f.DefValue\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t)\n\treturn colorMapping[defColor]\n}\n\nvar promptModeCmd *cobra.Command\n\nfunc newPromptCmd() *cobra.Command {\n\tpromptModeCmd = &cobra.Command{\n\t\tUse:     \"prompt\",\n\t\tShort:   \"enter the interactive gnmic prompt mode\",\n\t\tPreRunE: gApp.PromptPreRunE,\n\t\tRunE:    gApp.PromptRunE,\n\t\tPostRun: func(cmd *cobra.Command, _ []string) {\n\t\t\tcmd.ResetFlags()\n\t\t\t//initPromptFlags(cmd)\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitPromptFlags(promptModeCmd)\n\treturn promptModeCmd\n}\n\nvar promptQuitCmd = &cobra.Command{\n\tUse:   \"quit\",\n\tShort: \"quit the gnmic-prompt\",\n\tRun: func(_ *cobra.Command, _ []string) {\n\t\t// cancel gctx\n\t\tgApp.Cfn()\n\t\t// save history\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tf, err := os.Create(home + \"/.gnmic.history\")\n\t\tif err != nil {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tl := len(gApp.PromptHistory)\n\t\tif l > 128 {\n\t\t\tgApp.PromptHistory = gApp.PromptHistory[l-128:]\n\t\t}\n\t\tfor i := range gApp.PromptHistory {\n\t\t\tf.WriteString(gApp.PromptHistory[i] + \"\\n\")\n\t\t}\n\t\tf.Close()\n\t\tos.Exit(0)\n\t},\n}\n\nvar targetCmd = &cobra.Command{\n\tUse:   \"target\",\n\tShort: \"manipulate configured targets\",\n}\n\nvar targetListCmd = &cobra.Command{\n\tUse:   \"list\",\n\tShort: \"list configured targets\",\n\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\ttargetsConfig, err := gApp.Config.GetTargets()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttabData := targetTable(targetsConfig, true)\n\t\trenderTable(tabData, targetListHeader)\n\t\treturn nil\n\t},\n\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\tname = \"\"\n\t},\n}\n\nvar targetShowCmd = &cobra.Command{\n\tUse:   \"show\",\n\tShort: \"show a target details\",\n\tAnnotations: map[string]string{\n\t\t\"--name\": \"TARGET\",\n\t},\n\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\tif name == \"\" {\n\t\t\tfmt.Println(\"provide a target name with --name\")\n\t\t\treturn nil\n\t\t}\n\t\ttargetsConfig, err := gApp.Config.GetTargets()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif tc, ok := targetsConfig[name]; ok {\n\t\t\ttabData := targetTable(map[string]*types.TargetConfig{name: tc}, false)\n\t\t\trenderTable(tabData, []string{\"Param\", \"Value\"})\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"unknown target\")\n\t},\n\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\tname = \"\"\n\t},\n}\n\nvar subscriptionCmd = &cobra.Command{\n\tUse:   \"subscription\",\n\tShort: \"manipulate configured subscriptions\",\n}\n\nvar subscriptionListCmd = &cobra.Command{\n\tUse:   \"list\",\n\tShort: \"list configured subscriptions\",\n\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\tsubs, err := gApp.Config.GetSubscriptions(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttabData := subscriptionTable(subs, true)\n\t\trenderTable(tabData, subscriptionListHeader)\n\t\treturn nil\n\t},\n\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\tname = \"\"\n\t},\n}\n\nvar subscriptionShowCmd = &cobra.Command{\n\tUse:   \"show\",\n\tShort: \"show a subscription details\",\n\tAnnotations: map[string]string{\n\t\t\"--name\": \"SUBSCRIPTION\",\n\t},\n\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\tif name == \"\" {\n\t\t\tfmt.Println(\"provide a subscription name with --name\")\n\t\t\treturn nil\n\t\t}\n\t\tsubs, err := gApp.Config.GetSubscriptions(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s, ok := subs[name]; ok {\n\t\t\ttabData := subscriptionTable(map[string]*types.SubscriptionConfig{name: s}, false)\n\t\t\trenderTable(tabData, []string{\"Param\", \"Value\"})\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"unknown subscription\")\n\t},\n\tPostRun: func(_ *cobra.Command, _ []string) {\n\t\tname = \"\"\n\t},\n}\n\nvar outputCmd = &cobra.Command{\n\tUse:   \"output\",\n\tShort: \"manipulate configured outputs\",\n}\n\nvar outputListCmd = &cobra.Command{\n\tUse:   \"list\",\n\tShort: \"list configured outputs\",\n\tRunE: func(_ *cobra.Command, _ []string) error {\n\t\ttabData := gApp.Config.GetOutputsConfigs()\n\t\trenderTable(tabData, []string{\"Name\", \"Config\"})\n\t\treturn nil\n\t},\n}\n\nfunc renderTable(tabData [][]string, header []string) {\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(header)\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetAutoFormatHeaders(false)\n\ttable.SetAutoWrapText(false)\n\ttable.AppendBulk(tabData)\n\ttable.Render()\n}\n\nfunc targetTable(targetConfigs map[string]*types.TargetConfig, list bool) [][]string {\n\tif list {\n\t\ttabData := make([][]string, 0)\n\t\tfor _, tc := range targetConfigs {\n\t\t\ttabData = append(tabData, []string{\n\t\t\t\ttc.Name,\n\t\t\t\ttc.Address,\n\t\t\t\ttc.UsernameString(),\n\t\t\t\ttc.PasswordString(),\n\t\t\t\ttc.InsecureString(),\n\t\t\t\ttc.SkipVerifyString(),\n\t\t\t\ttc.TLSCAString(),\n\t\t\t\ttc.TLSCertString(),\n\t\t\t\ttc.TLSKeyString(),\n\t\t\t})\n\t\t}\n\t\tsort.Slice(tabData, func(i, j int) bool {\n\t\t\treturn tabData[i][0] < tabData[j][0]\n\t\t})\n\t\treturn tabData\n\t}\n\tif len(targetConfigs) > 1 {\n\t\tgApp.Logger.Printf(\"cannot show multiple targets\")\n\t\treturn nil\n\t}\n\tfor _, tc := range targetConfigs {\n\t\ttabData := make([][]string, 0, 16)\n\t\ttabData = append(tabData, []string{\"Name\", tc.Name})\n\t\ttabData = append(tabData, []string{\"Address\", tc.Address})\n\t\ttabData = append(tabData, []string{\"Username\", tc.UsernameString()})\n\t\ttabData = append(tabData, []string{\"Password\", tc.PasswordString()})\n\t\ttabData = append(tabData, []string{\"Insecure\", tc.InsecureString()})\n\t\ttabData = append(tabData, []string{\"Skip Verify\", tc.SkipVerifyString()})\n\t\ttabData = append(tabData, []string{\"TLS CA\", tc.TLSCAString()})\n\t\ttabData = append(tabData, []string{\"TLS Certificate\", tc.TLSCertString()})\n\t\ttabData = append(tabData, []string{\"TLS Key\", tc.TLSKeyString()})\n\t\ttabData = append(tabData, []string{\"TLS Min Version\", tc.TLSMinVersion})\n\t\ttabData = append(tabData, []string{\"TLS Max Version\", tc.TLSMaxVersion})\n\t\ttabData = append(tabData, []string{\"TLS Version\", tc.TLSVersion})\n\t\ttabData = append(tabData, []string{\"Subscriptions\", strings.Join(tc.Subscriptions, \"\\n\")})\n\t\ttabData = append(tabData, []string{\"Outputs\", strings.Join(tc.Outputs, \"\\n\")})\n\t\ttabData = append(tabData, []string{\"Buffer Size\", tc.BufferSizeString()})\n\t\ttabData = append(tabData, []string{\"Retry Timer\", tc.RetryTimer.String()})\n\t\treturn tabData\n\t}\n\treturn [][]string{}\n}\n\nfunc subscriptionTable(scs map[string]*types.SubscriptionConfig, list bool) [][]string {\n\tif list {\n\t\ttabData := make([][]string, 0, len(scs))\n\t\tfor _, sub := range scs {\n\t\t\tenc := \"\"\n\t\t\tif sub.Encoding != nil {\n\t\t\t\tenc = *sub.Encoding\n\t\t\t}\n\t\t\ttabData = append(tabData, []string{\n\t\t\t\tsub.Name,\n\t\t\t\tsub.ModeString(),\n\t\t\t\tsub.PrefixString(),\n\t\t\t\tsub.PathsString(),\n\t\t\t\tsub.SampleIntervalString(),\n\t\t\t\tenc,\n\t\t\t})\n\t\t}\n\t\tsort.Slice(tabData, func(i, j int) bool {\n\t\t\treturn tabData[i][0] < tabData[j][0]\n\t\t})\n\t\treturn tabData\n\t}\n\tif len(scs) > 1 {\n\t\tgApp.Logger.Printf(\"cannot show multiple subscriptions\")\n\t\treturn nil\n\t}\n\tfor _, sub := range scs {\n\t\ttabData := make([][]string, 0, 8)\n\t\ttabData = append(tabData, []string{\"Name\", sub.Name})\n\t\ttabData = append(tabData, []string{\"Mode\", sub.ModeString()})\n\t\ttabData = append(tabData, []string{\"Prefix\", sub.PrefixString()})\n\t\ttabData = append(tabData, []string{\"Paths\", sub.PathsString()})\n\t\ttabData = append(tabData, []string{\"Sample Interval\", sub.SampleIntervalString()})\n\t\ttabData = append(tabData, []string{\"Encoding\", *sub.Encoding})\n\t\ttabData = append(tabData, []string{\"Qos\", sub.QosString()})\n\t\ttabData = append(tabData, []string{\"Heartbeat Interval\", sub.HeartbeatIntervalString()})\n\t\treturn tabData\n\t}\n\treturn [][]string{}\n}\n\nvar name string\n\nfunc findMatchedXPATH(entry *yang.Entry, input string, prefixPresent bool) []goprompt.Suggest {\n\tif strings.HasPrefix(input, \":\") {\n\t\treturn nil\n\t}\n\tsuggestions := make([]goprompt.Suggest, 0, 4)\n\tinputLen := len(input)\n\tfor i, c := range input {\n\t\tif c == ':' && i+1 < inputLen {\n\t\t\tinput = input[i+1:]\n\t\t\tinputLen -= (i + 1)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tprependOrigin := gApp.Config.LocalFlags.PromptSuggestWithOrigin && !prefixPresent\n\tfor name, child := range entry.Dir {\n\t\tif child.IsCase() || child.IsChoice() {\n\t\t\tfor _, gchild := range child.Dir {\n\t\t\t\tsuggestions = append(suggestions, findMatchedXPATH(gchild, input, prefixPresent)...)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tpathelem := \"/\" + name\n\t\tif strings.HasPrefix(pathelem, input) {\n\t\t\tnode := \"\"\n\t\t\tif inputLen == 0 && prependOrigin {\n\t\t\t\tnode = fmt.Sprintf(\"%s:/%s\", entry.Name, name)\n\t\t\t} else if inputLen > 0 && input[0] == '/' {\n\t\t\t\tnode = name\n\t\t\t} else {\n\t\t\t\tnode = pathelem\n\t\t\t}\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: node, Description: buildXPATHDescription(child)})\n\t\t\tif child.Key != \"\" { // list\n\t\t\t\tkeylist := strings.Split(child.Key, \" \")\n\t\t\t\tfor _, key := range keylist {\n\t\t\t\t\tnode = fmt.Sprintf(\"%s[%s=*]\", node, key)\n\t\t\t\t}\n\t\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: node, Description: buildXPATHDescription(child)})\n\t\t\t}\n\t\t} else if strings.HasPrefix(input, pathelem) {\n\t\t\tvar prevC rune\n\t\t\tvar bracketCount int\n\t\t\tvar endIndex int = -1\n\t\t\tvar stop bool\n\t\t\tfor i, c := range input {\n\t\t\t\tswitch c {\n\t\t\t\tcase '[':\n\t\t\t\t\tbracketCount++\n\t\t\t\tcase ']':\n\t\t\t\t\tif prevC != '\\\\' {\n\t\t\t\t\t\tbracketCount--\n\t\t\t\t\t\tendIndex = i\n\t\t\t\t\t}\n\t\t\t\tcase '/':\n\t\t\t\t\tif i != 0 && bracketCount == 0 {\n\t\t\t\t\t\tendIndex = i\n\t\t\t\t\t\tstop = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif stop {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tprevC = c\n\t\t\t}\n\t\t\tif bracketCount == 0 {\n\t\t\t\tif endIndex >= 0 {\n\t\t\t\t\tsuggestions = append(suggestions, findMatchedXPATH(child, input[endIndex:], prefixPresent)...)\n\t\t\t\t} else {\n\t\t\t\t\tsuggestions = append(suggestions, findMatchedXPATH(child, input[len(pathelem):], prefixPresent)...)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn suggestions\n}\n\nfunc getDescriptionPrefix(entry *yang.Entry) string {\n\tswitch {\n\tcase entry.Dir == nil && entry.ListAttr != nil: // leaf-list\n\t\treturn \"[⋯]\"\n\tcase entry.Dir == nil: // leaf\n\t\treturn \"   \"\n\tcase entry.ListAttr != nil: // list\n\t\treturn \"[+]\"\n\tdefault: // container\n\t\treturn \"[+]\"\n\t}\n}\n\nfunc getEntryType(entry *yang.Entry) string {\n\tif entry.Type != nil {\n\t\treturn entry.Type.Kind.String()\n\t}\n\treturn \"\"\n}\n\nfunc buildXPATHDescription(entry *yang.Entry) string {\n\tsb := strings.Builder{}\n\tsb.WriteString(getDescriptionPrefix(entry))\n\tsb.WriteString(\" \")\n\tsb.WriteString(getPermissions(entry))\n\tsb.WriteString(\" \")\n\tif gApp.Config.LocalFlags.PromptDescriptionWithTypes {\n\t\tn, _ := sb.WriteString(getEntryType(entry))\n\t\tif n > 0 {\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\t}\n\tif gApp.Config.LocalFlags.PromptDescriptionWithPrefix {\n\t\tif entry.Prefix != nil {\n\t\t\tsb.WriteString(entry.Prefix.Name)\n\t\t\tsb.WriteString(\": \")\n\t\t}\n\t}\n\tsb.WriteString(entry.Description)\n\treturn sb.String()\n}\n\nfunc getPermissions(entry *yang.Entry) string {\n\tif entry == nil {\n\t\treturn \"(rw)\"\n\t}\n\tswitch entry.Config {\n\tcase yang.TSTrue:\n\t\treturn \"(rw)\"\n\tcase yang.TSFalse:\n\t\treturn \"(ro)\"\n\tcase yang.TSUnset:\n\t\treturn getPermissions(entry.Parent)\n\t}\n\treturn \"(rw)\"\n}\n\nfunc findMatchedSchema(entry *yang.Entry, input string) []*yang.Entry {\n\tschemaNodes := []*yang.Entry{}\n\tfor name, child := range entry.Dir {\n\t\tpathelem := \"/\" + name\n\t\tif strings.HasPrefix(pathelem, input) {\n\t\t\tschemaNodes = append(schemaNodes, child)\n\t\t\tif child.Key != \"\" { // list\n\n\t\t\t\tschemaNodes = append(schemaNodes, child)\n\t\t\t}\n\t\t} else if strings.HasPrefix(input, pathelem) {\n\t\t\tvar prevC rune\n\t\t\tvar bracketCount int\n\t\t\tvar endIndex int = -1\n\t\t\tvar stop bool\n\t\t\tfor i, c := range input {\n\t\t\t\tswitch c {\n\t\t\t\tcase '[':\n\t\t\t\t\tbracketCount++\n\t\t\t\tcase ']':\n\t\t\t\t\tif prevC != '\\\\' {\n\t\t\t\t\t\tbracketCount--\n\t\t\t\t\t\tendIndex = i\n\t\t\t\t\t}\n\t\t\t\tcase '/':\n\t\t\t\t\tif i != 0 && bracketCount == 0 {\n\t\t\t\t\t\tendIndex = i\n\t\t\t\t\t\tstop = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif stop {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tprevC = c\n\t\t\t}\n\t\t\tif bracketCount == 0 {\n\t\t\t\tif endIndex >= 0 {\n\t\t\t\t\tschemaNodes = append(schemaNodes, findMatchedSchema(child, input[endIndex:])...)\n\t\t\t\t} else {\n\t\t\t\t\tschemaNodes = append(schemaNodes, findMatchedSchema(child, input[len(pathelem):])...)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn schemaNodes\n}\n\nvar filePathCompleter = completer.FilePathCompleter{\n\tIgnoreCase: true,\n\tFilter: func(fi os.FileInfo) bool {\n\t\treturn fi.IsDir() || !strings.HasPrefix(fi.Name(), \".\")\n\t},\n}\n\nvar yangPathCompleter = completer.FilePathCompleter{\n\tIgnoreCase: true,\n\tFilter: func(fi os.FileInfo) bool {\n\t\treturn fi.IsDir() || strings.HasSuffix(fi.Name(), \".yang\")\n\t},\n}\n\nvar dirPathCompleter = completer.FilePathCompleter{\n\tIgnoreCase: true,\n\tFilter: func(fi os.FileInfo) bool {\n\t\treturn fi.IsDir()\n\t},\n}\n\nfunc findDynamicSuggestions(annotation string, doc goprompt.Document) []goprompt.Suggest {\n\tswitch annotation {\n\tcase \"XPATH\":\n\t\tline := doc.CurrentLine()\n\t\tword := doc.GetWordBeforeCursor()\n\t\tsuggestions := make([]goprompt.Suggest, 0, 16)\n\t\tentries := []*yang.Entry{}\n\t\tif index := strings.Index(line, \"--prefix\"); index >= 0 {\n\t\t\tline = strings.TrimLeft(line[index+8:], \" \") // 8 is len(\"--prefix\")\n\t\t\tend := strings.Index(line, \" \")\n\t\t\tif end >= 0 {\n\t\t\t\tline = line[:end]\n\t\t\t\tlineLen := len(line)\n\t\t\t\t// remove \"origin:\" from prefix if present\n\t\t\t\tfor i, c := range line {\n\t\t\t\t\tif c == ':' && i+1 < lineLen {\n\t\t\t\t\t\tline = line[i+1:]\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// find yang entries matching the prefix\n\t\t\t\tfor _, entry := range gApp.SchemaTree.Dir {\n\t\t\t\t\tentries = append(entries, findMatchedSchema(entry, line)...)\n\t\t\t\t}\n\t\t\t\t// generate suggestions from matching entries\n\t\t\t\tfor _, entry := range entries {\n\t\t\t\t\tsuggestions = append(suggestions, findMatchedXPATH(entry, word, true)...)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// generate suggestions from yang schema\n\t\t\tfor _, entry := range gApp.SchemaTree.Dir {\n\t\t\t\tsuggestions = append(suggestions, findMatchedXPATH(entry, word, false)...)\n\t\t\t}\n\t\t}\n\t\tsort.Slice(suggestions, func(i, j int) bool {\n\t\t\tif suggestions[i].Text == suggestions[j].Text {\n\t\t\t\treturn suggestions[i].Description < suggestions[j].Description\n\t\t\t}\n\t\t\treturn suggestions[i].Text < suggestions[j].Text\n\t\t})\n\t\treturn suggestions\n\tcase \"PREFIX\":\n\t\tword := doc.GetWordBeforeCursor()\n\t\tsuggestions := make([]goprompt.Suggest, 0, 16)\n\t\tfor _, entry := range gApp.SchemaTree.Dir {\n\t\t\tsuggestions = append(suggestions, findMatchedXPATH(entry, word, false)...)\n\t\t}\n\t\tsort.Slice(suggestions, func(i, j int) bool {\n\t\t\tif suggestions[i].Text == suggestions[j].Text {\n\t\t\t\treturn suggestions[i].Description < suggestions[j].Description\n\t\t\t}\n\t\t\treturn suggestions[i].Text < suggestions[j].Text\n\t\t})\n\t\treturn suggestions\n\tcase \"FILE\":\n\t\treturn filePathCompleter.Complete(doc)\n\tcase \"YANG\":\n\t\treturn yangPathCompleter.Complete(doc)\n\tcase \"MODEL\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(gApp.SchemaTree.Dir))\n\t\tfor name, dir := range gApp.SchemaTree.Dir {\n\t\t\tif dir != nil {\n\t\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: name, Description: dir.Description})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: name})\n\t\t}\n\t\tsort.Slice(suggestions, func(i, j int) bool {\n\t\t\tif suggestions[i].Text == suggestions[j].Text {\n\t\t\t\treturn suggestions[i].Description < suggestions[j].Description\n\t\t\t}\n\t\t\treturn suggestions[i].Text < suggestions[j].Text\n\t\t})\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"DIR\":\n\t\treturn dirPathCompleter.Complete(doc)\n\tcase \"ENCODING\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(encodings))\n\t\tfor _, sugg := range encodings {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"FORMAT\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(formats))\n\t\tfor _, sugg := range formats {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"STORE\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(get.DataType))\n\t\tfor _, sugg := range get.DataType {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"SUBSC_MODE\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(subscribe.Modes))\n\t\tfor _, sugg := range subscribe.Modes {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"STREAM_MODE\":\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(subscribe.StreamModes))\n\t\tfor _, sugg := range subscribe.StreamModes {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"SUBSCRIPTION\":\n\t\tsubs := gApp.Config.GetSubscriptionsFromFile()\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(subs))\n\t\tfor _, sub := range subs {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sub.Name, Description: subscriptionDescription(sub)})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"TARGET\":\n\t\ttargetsConfig := gApp.Config.TargetsList()\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(targetsConfig))\n\t\tfor _, target := range targetsConfig {\n\t\t\tsb := strings.Builder{}\n\t\t\tif target.Name != target.Address {\n\t\t\t\tsb.WriteString(\"address=\")\n\t\t\t\tsb.WriteString(target.Address)\n\t\t\t\tsb.WriteString(\", \")\n\t\t\t}\n\t\t\tsb.WriteString(\"secure=\")\n\t\t\tif *target.Insecure {\n\t\t\t\tsb.WriteString(\"false\")\n\t\t\t} else {\n\t\t\t\tsb.WriteString(fmt.Sprintf(\"%v\", !(strings.Contains(doc.CurrentLine(), \"--insecure\"))))\n\t\t\t}\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: target.Name, Description: sb.String()})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tcase \"OUTPUT\":\n\t\toutputGroups := gApp.Config.GetOutputsSuggestions()\n\t\tsuggestions := make([]goprompt.Suggest, 0, len(outputGroups))\n\t\tfor _, sugg := range outputGroups {\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: sugg.Name, Description: strings.Join(sugg.Types, \", \")})\n\t\t}\n\t\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\t}\n\treturn []goprompt.Suggest{}\n}\n\nfunc subscriptionDescription(sub *types.SubscriptionConfig) string {\n\tsb := strings.Builder{}\n\tsb.WriteString(\"mode=\")\n\tsb.WriteString(sub.Mode)\n\tsb.WriteString(\", \")\n\tif strings.ToLower(sub.Mode) == \"stream\" {\n\t\tsb.WriteString(\"stream-mode=\")\n\t\tsb.WriteString(sub.StreamMode)\n\t\tsb.WriteString(\", \")\n\t\tif strings.ToLower(sub.StreamMode) == \"sample\" {\n\t\t\tsb.WriteString(\"sample-interval=\")\n\t\t\tsb.WriteString(sub.SampleInterval.String())\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\t}\n\tif sub.Encoding != nil {\n\t\tsb.WriteString(\"encoding=\")\n\t\tsb.WriteString(*sub.Encoding)\n\t\tsb.WriteString(\", \")\n\t}\n\tif sub.Prefix != \"\" {\n\t\tsb.WriteString(\"prefix=\")\n\t\tsb.WriteString(sub.Prefix)\n\t\tsb.WriteString(\", \")\n\t}\n\tsb.WriteString(\"path(s)=\")\n\tsb.WriteString(strings.Join(sub.Paths, \",\"))\n\treturn sb.String()\n}\n\nfunc showCommandArguments(b *goprompt.Buffer) {\n\tdoc := b.Document()\n\tshowLocalFlags := false\n\tcommand := gApp.RootCmd\n\targs := strings.Fields(doc.CurrentLine())\n\tif found, _, err := command.Find(args); err == nil {\n\t\tif command != found {\n\t\t\tshowLocalFlags = true\n\t\t}\n\t\tcommand = found\n\t}\n\tmaxNameLen := 0\n\tsuggestions := make([]goprompt.Suggest, 0, 32)\n\tif command.HasAvailableSubCommands() {\n\t\tfor _, c := range command.Commands() {\n\t\t\tif c.Hidden {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlength := len(c.Name())\n\t\t\tif maxNameLen < length {\n\t\t\t\tmaxNameLen = length\n\t\t\t}\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: c.Name(), Description: c.Short})\n\t\t}\n\t}\n\tif showLocalFlags {\n\t\taddFlags := func(flag *pflag.Flag) {\n\t\t\tif flag.Hidden {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlength := len(flag.Name)\n\t\t\tif maxNameLen < length+2 {\n\t\t\t\tmaxNameLen = length + 2\n\t\t\t}\n\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: \"--\" + flag.Name, Description: flag.Usage})\n\t\t}\n\t\tcommand.LocalFlags().VisitAll(addFlags)\n\t}\n\tsuggestions = goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n\tif len(suggestions) == 0 {\n\t\treturn\n\t}\n\tif err := termbox.Init(); err != nil {\n\t\tgApp.Logger.Fatalf(\"%v\", err)\n\t}\n\tw, _ := termbox.Size()\n\ttermbox.Close()\n\tfmt.Printf(\"\\n\")\n\tmaxDescLen := w - maxNameLen - 6\n\tformat := fmt.Sprintf(\"  %%-%ds : %%-%ds\\n\", maxNameLen, maxDescLen)\n\tfor i := range suggestions {\n\t\tlength := len(suggestions[i].Description)\n\t\tif length > maxDescLen {\n\t\t\tfmt.Printf(format, suggestions[i].Text, suggestions[i].Description[:maxDescLen])\n\t\t} else {\n\t\t\tfmt.Printf(format, suggestions[i].Text, suggestions[i].Description)\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\n// ExecutePrompt load and run gnmic-prompt mode.\nfunc ExecutePrompt() {\n\tinitPromptCmds()\n\tshell := &cmdPrompt{\n\t\tRootCmd: gApp.RootCmd,\n\t\tGoPromptOptions: []goprompt.Option{\n\t\t\tgoprompt.OptionTitle(\"gnmic-prompt\"),\n\t\t\tgoprompt.OptionPrefix(\"gnmic> \"),\n\t\t\tgoprompt.OptionHistory(gApp.PromptHistory),\n\t\t\tgoprompt.OptionMaxSuggestion(gApp.Config.LocalFlags.PromptMaxSuggestions),\n\t\t\tgoprompt.OptionPrefixTextColor(getColor(\"prefix-color\")),\n\t\t\tgoprompt.OptionPreviewSuggestionTextColor(goprompt.Cyan),\n\t\t\tgoprompt.OptionSuggestionTextColor(goprompt.White),\n\t\t\tgoprompt.OptionSuggestionBGColor(getColor(\"suggestions-bg-color\")),\n\t\t\tgoprompt.OptionSelectedSuggestionTextColor(goprompt.Black),\n\t\t\tgoprompt.OptionSelectedSuggestionBGColor(goprompt.White),\n\t\t\tgoprompt.OptionDescriptionTextColor(goprompt.LightGray),\n\t\t\tgoprompt.OptionDescriptionBGColor(getColor(\"description-bg-color\")),\n\t\t\tgoprompt.OptionSelectedDescriptionTextColor(goprompt.Black),\n\t\t\tgoprompt.OptionSelectedDescriptionBGColor(goprompt.White),\n\t\t\tgoprompt.OptionScrollbarBGColor(goprompt.DarkGray),\n\t\t\tgoprompt.OptionScrollbarThumbColor(goprompt.Blue),\n\t\t\tgoprompt.OptionAddASCIICodeBind(\n\t\t\t\t// bind '?' character to show cmd args\n\t\t\t\tgoprompt.ASCIICodeBind{\n\t\t\t\t\tASCIICode: []byte{0x3f},\n\t\t\t\t\tFn:        showCommandArguments,\n\t\t\t\t},\n\t\t\t\t// bind OS X Option+Left key binding\n\t\t\t\tgoprompt.ASCIICodeBind{\n\t\t\t\t\tASCIICode: []byte{0x1b, 0x62},\n\t\t\t\t\tFn:        goprompt.GoLeftWord,\n\t\t\t\t},\n\t\t\t\t// bind OS X Option+Right key binding\n\t\t\t\tgoprompt.ASCIICodeBind{\n\t\t\t\t\tASCIICode: []byte{0x1b, 0x66},\n\t\t\t\t\tFn:        goprompt.GoRightWord,\n\t\t\t\t},\n\t\t\t),\n\t\t\tgoprompt.OptionAddKeyBind(\n\t\t\t\t// bind Linux CTRL+Left key binding\n\t\t\t\tgoprompt.KeyBind{\n\t\t\t\t\tKey: goprompt.ControlLeft,\n\t\t\t\t\tFn:  goprompt.GoLeftWord,\n\t\t\t\t},\n\t\t\t\t// bind Linux CTRL+Right key binding\n\t\t\t\tgoprompt.KeyBind{\n\t\t\t\t\tKey: goprompt.ControlRight,\n\t\t\t\t\tFn:  goprompt.GoRightWord,\n\t\t\t\t},\n\t\t\t\t// bind CTRL+Z key to delete path elements\n\t\t\t\tgoprompt.KeyBind{\n\t\t\t\t\tKey: goprompt.ControlZ,\n\t\t\t\t\tFn: func(buf *goprompt.Buffer) {\n\t\t\t\t\t\t// If the last word before the cursor does not contain a \"/\" return.\n\t\t\t\t\t\t// This is needed to avoid deleting down to a previous flag value\n\t\t\t\t\t\tif !strings.Contains(buf.Document().GetWordBeforeCursorWithSpace(), \"/\") {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Check if the last rune is a PathSeparator and is not the path root then delete it\n\t\t\t\t\t\tif buf.Document().GetCharRelativeToCursor(0) == os.PathSeparator && buf.Document().GetCharRelativeToCursor(-1) != ' ' {\n\t\t\t\t\t\t\tbuf.DeleteBeforeCursor(1)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Delete down until the next \"/\"\n\t\t\t\t\t\tbuf.DeleteBeforeCursor(len([]rune(buf.Document().GetWordBeforeCursorUntilSeparator(\"/\"))))\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t),\n\t\t\tgoprompt.OptionCompletionWordSeparator(completer.FilePathCompletionSeparator),\n\t\t\t// goprompt.OptionCompletionOnDown(),\n\t\t\tgoprompt.OptionShowCompletionAtStart(),\n\t\t},\n\t}\n\tshell.Run()\n}\n\nfunc initPromptCmds() {\n\tgApp.RootCmd.AddCommand(promptQuitCmd)\n\tgApp.RootCmd.AddCommand(targetCmd)\n\tgApp.RootCmd.AddCommand(subscriptionCmd)\n\tgApp.RootCmd.AddCommand(outputCmd)\n\n\ttargetCmd.AddCommand(targetListCmd)\n\ttargetCmd.AddCommand(targetShowCmd)\n\ttargetShowCmd.Flags().StringVarP(&name, \"name\", \"\", \"\", \"target name\")\n\n\tsubscriptionCmd.AddCommand(subscriptionListCmd)\n\tsubscriptionCmd.AddCommand(subscriptionShowCmd)\n\tsubscriptionShowCmd.Flags().StringVarP(&name, \"name\", \"\", \"\", \"subscription name\")\n\n\toutputCmd.AddCommand(outputListCmd)\n\n\tgApp.RootCmd.RemoveCommand(promptModeCmd)\n}\n\n// Reference: https://github.com/stromland/cobra-prompt\n// cmdPrompt requires RootCmd to run\ntype cmdPrompt struct {\n\t// RootCmd is the start point, all its sub commands and flags will be available as suggestions\n\tRootCmd *cobra.Command\n\n\t// GoPromptOptions is for customize go-prompt\n\t// see https://github.com/c-bata/go-prompt/blob/master/option.go\n\tGoPromptOptions []goprompt.Option\n}\n\n// Run will automatically generate suggestions for all cobra commands\n// and flags defined by RootCmd and execute the selected commands.\nfunc (co cmdPrompt) Run() {\n\tp := goprompt.New(\n\t\tfunc(in string) {\n\t\t\tpromptArgs, err := parsePromptArgs(in)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tos.Args = append([]string{os.Args[0]}, promptArgs...)\n\t\t\tif len(promptArgs) > 0 {\n\t\t\t\terr := co.RootCmd.Execute()\n\t\t\t\tif err == nil && in != \"\" {\n\t\t\t\t\tgApp.PromptHistory = append(gApp.PromptHistory, in)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tfunc(d goprompt.Document) []goprompt.Suggest {\n\t\t\treturn findSuggestions(co, d)\n\t\t},\n\t\tco.GoPromptOptions...,\n\t)\n\tp.Run()\n}\n\nfunc parsePromptArgs(in string) ([]string, error) {\n\tvar m = []string{}\n\tvar s string\n\n\t// space suffix ensures the last string is appended\n\tin = strings.TrimSpace(in) + \" \"\n\n\tlastQuote := rune(0)\n\tisSpace := false\n\tfor _, c := range in {\n\t\tswitch {\n\t\t// ending a quoted item, break out, skip this character and reset lastQuote\n\t\tcase c == lastQuote:\n\t\t\tlastQuote = rune(0)\n\n\t\t// in a quoted item, include this character\n\t\tcase lastQuote != rune(0):\n\t\t\ts += string(c)\n\n\t\t// starting a quoted item, set lastQuote\n\t\tcase unicode.In(c, unicode.Quotation_Mark):\n\t\t\tisSpace = false\n\t\t\tlastQuote = c\n\n\t\t// a space, append the string to the list\n\t\t// if it was not already added (previous char was a space)\n\t\t// and reset string s\n\t\tcase unicode.IsSpace(c):\n\t\t\tif isSpace {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisSpace = true\n\t\t\tm = append(m, s)\n\t\t\ts = \"\"\n\t\t// add the char to the string\n\t\tdefault:\n\t\t\tisSpace = false\n\t\t\ts += string(c)\n\t\t}\n\t}\n\n\tif lastQuote != rune(0) {\n\t\treturn nil, fmt.Errorf(\"quotes not closed\")\n\t}\n\n\treturn m, nil\n}\n\nfunc findSuggestions(co cmdPrompt, doc goprompt.Document) []goprompt.Suggest {\n\tcommand := co.RootCmd\n\targs := strings.Fields(doc.CurrentLine())\n\tif found, _, err := command.Find(args); err == nil {\n\t\tcommand = found\n\t}\n\n\tsuggestions := make([]goprompt.Suggest, 0, 32)\n\n\t// check flag annotation for the dynamic suggestion\n\tannotation := \"\"\n\targnum := len(args)\n\twordBefore := doc.GetWordBeforeCursor()\n\tif wordBefore == \"\" {\n\t\tif argnum >= 1 {\n\t\t\tannotation = command.Annotations[args[argnum-1]]\n\t\t}\n\t} else {\n\t\tif argnum >= 2 {\n\t\t\tannotation = command.Annotations[args[argnum-2]]\n\t\t}\n\t}\n\tif annotation != \"\" {\n\t\treturn append(suggestions, findDynamicSuggestions(annotation, doc)...)\n\t}\n\t// add sub commands suggestions if they exist\n\tif command.HasAvailableSubCommands() {\n\t\tfor _, c := range command.Commands() {\n\t\t\tif !c.Hidden {\n\t\t\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: c.Name(), Description: c.Short})\n\t\t\t}\n\t\t}\n\t}\n\taddFlags := func(flag *pflag.Flag) {\n\t\tif flag.Hidden {\n\t\t\treturn\n\t\t}\n\t\tsuggestions = append(suggestions, goprompt.Suggest{Text: \"--\" + flag.Name, Description: flag.Usage})\n\t}\n\t// load local flags\n\tcommand.LocalFlags().VisitAll(addFlags)\n\tif gApp.Config.LocalFlags.PromptSuggestAllFlags {\n\t\t// load inherited flags\n\t\tcommand.InheritedFlags().VisitAll(addFlags)\n\t}\n\n\treturn goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true)\n}\n"
  },
  {
    "path": "pkg/cmd/prompt_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n)\n\ntype testItem struct {\n\tin  string\n\tout []string\n}\n\nvar promptArgsTestSet = map[string]testItem{\n\t\"no_args\": {\n\t\tin:  ``,\n\t\tout: []string{\"\"},\n\t},\n\t\"one_arg\": {\n\t\tin:  `arg`,\n\t\tout: []string{\"arg\"},\n\t},\n\t\"multiple_args\": {\n\t\tin:  `arg1 arg2 --flag1 val1`,\n\t\tout: []string{\"arg1\", \"arg2\", \"--flag1\", \"val1\"},\n\t},\n\t\"single_quoted_args\": {\n\t\tin:  `arg1 arg2 --flag1 'val 1'`,\n\t\tout: []string{\"arg1\", \"arg2\", \"--flag1\", \"val 1\"},\n\t},\n\t\"double_quoted_args\": {\n\t\tin:  `arg1 arg2 --flag1 \"val 1\"`,\n\t\tout: []string{\"arg1\", \"arg2\", \"--flag1\", \"val 1\"},\n\t},\n\t\"quoted_args_with_multiple_spaces\": {\n\t\tin:  `arg1 arg2 --flag1 \"val 1\" --flag2 \"val  \\t2\"`,\n\t\tout: []string{\"arg1\", \"arg2\", \"--flag1\", \"val 1\", \"--flag2\", `val  \\t2`},\n\t},\n\t\"quoted_args_with_spaces_between_items\": {\n\t\tin:  `      arg1 arg2       --flag1 'val 1'      --flag2 \"val 2\"             `,\n\t\tout: []string{\"arg1\", \"arg2\", \"--flag1\", \"val 1\", \"--flag2\", `val 2`},\n\t},\n}\n\nfunc TestGetInstancesTagsMatches(t *testing.T) {\n\tfor name, item := range promptArgsTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tres, err := parsePromptArgs(item.in)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tt.Logf(\"exp value: %#v\", item.out)\n\t\t\tt.Logf(\"got value: %#v\", res)\n\t\t\tif !cmp.Equal(item.out, res) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/cmd/proxy/proxy.go",
    "content": "package proxy\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// proxyCmd represents the proxy command\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"proxy\",\n\t\tShort:   \"run a gNMI server that proxies gNMI requests towards known targets\",\n\t\tPreRunE: gApp.ProxyPreRunE,\n\t\tRunE:    gApp.ProxyRunE,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/root.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/capabilities\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/collector\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/diff\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/generate\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/get\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/getset\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/listener\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/path\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/processor\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/proxy\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/set\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/subscribe\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/tree\"\n\t\"github.com/openconfig/gnmic/pkg/cmd/version\"\n)\n\nvar encodings = [][2]string{\n\t{\"json\", \"JSON encoded string (RFC7159)\"},\n\t{\"bytes\", \"byte sequence whose semantics is opaque to the protocol\"},\n\t{\"proto\", \"serialised protobuf message using protobuf.Any\"},\n\t{\"ascii\", \"ASCII encoded string representing text formatted according to a target-defined convention\"},\n\t{\"json_ietf\", \"JSON_IETF encoded string (RFC7951)\"},\n}\nvar formats = [][2]string{\n\t{\"json\", \"similar to protojson but with xpath style paths and decoded timestamps\"},\n\t{\"protojson\", \"protocol buffer messages in JSON format\"},\n\t{\"prototext\", \"protocol buffer messages in textproto format\"},\n\t{\"event\", \"protocol buffer messages as a timestamped list of tags and values\"},\n\t{\"proto\", \"protocol buffer messages in binary wire format\"},\n}\n\nvar gApp = app.New()\n\nfunc newRootCmd() *cobra.Command {\n\tgApp.RootCmd = &cobra.Command{\n\t\tUse:   \"gnmic\",\n\t\tShort: \"run gnmi rpcs from the terminal (https://gnmic.openconfig.net)\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--encoding\": \"ENCODING\",\n\t\t\t\"--config\":   \"FILE\",\n\t\t\t\"--format\":   \"FORMAT\",\n\t\t\t\"--address\":  \"TARGET\",\n\t\t},\n\t\tPersistentPreRunE: gApp.PreRunE,\n\t}\n\tgApp.InitGlobalFlags()\n\tgApp.RootCmd.AddCommand(newCompletionCmd(gApp))\n\tgApp.RootCmd.AddCommand(newPromptCmd())\n\n\t// Subcommands\n\tgApp.RootCmd.AddCommand(capabilities.New(gApp))\n\tgApp.RootCmd.AddCommand(get.New(gApp))\n\tgApp.RootCmd.AddCommand(getset.New(gApp))\n\tgApp.RootCmd.AddCommand(listener.New(gApp))\n\tgApp.RootCmd.AddCommand(path.New(gApp))\n\tgApp.RootCmd.AddCommand(diff.New(gApp))\n\tgApp.RootCmd.AddCommand(generate.New(gApp))\n\tgApp.RootCmd.AddCommand(set.New(gApp))\n\tgApp.RootCmd.AddCommand(subscribe.New(gApp))\n\tgApp.RootCmd.AddCommand(version.New(gApp))\n\tgApp.RootCmd.AddCommand(proxy.New(gApp))\n\tgApp.RootCmd.AddCommand(processor.New(gApp))\n\tgApp.RootCmd.AddCommand(collector.New(gApp))\n\tgApp.RootCmd.AddCommand(tree.New(gApp))\n\treturn gApp.RootCmd\n}\n\n// Execute adds all child commands to the root command and sets flags appropriately.\n// This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tsetupCloseHandler(gApp.Cfn)\n\tif err := newRootCmd().Execute(); err != nil {\n\t\t//fmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif gApp.PromptMode {\n\t\tExecutePrompt()\n\t}\n}\n\nfunc init() {\n\tcobra.OnInitialize(initConfig)\n}\n\n// initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\terr := gApp.Config.Load(gApp.Context())\n\tif err == nil {\n\t\treturn\n\t}\n\tif _, ok := err.(*fs.PathError); !ok {\n\t\tfmt.Fprintf(os.Stderr, \"failed loading config file: %v\\n\", err)\n\t}\n}\n\nfunc setupCloseHandler(cancelFn context.CancelFunc) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)\n\tgo func() {\n\t\tsig := <-c\n\t\tfmt.Printf(\"\\nreceived signal '%s'. terminating...\\n\", sig.String())\n\t\tgApp.CleanupPlugins()\n\t\tcancelFn()\n\t\tos.Exit(0)\n\t}()\n}\n"
  },
  {
    "path": "pkg/cmd/set/set.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage set\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// New creates the set command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"set\",\n\t\tShort: \"run gnmi set on targets\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--delete\":       \"XPATH\",\n\t\t\t\"--prefix\":       \"PREFIX\",\n\t\t\t\"--replace\":      \"XPATH\",\n\t\t\t\"--replace-file\": \"FILE\",\n\t\t\t\"--replace-path\": \"XPATH\",\n\t\t\t\"--update\":       \"XPATH\",\n\t\t\t\"--update-file\":  \"FILE\",\n\t\t\t\"--update-path\":  \"XPATH\",\n\t\t},\n\t\tPreRunE:      gApp.SetPreRunE,\n\t\tRunE:         gApp.SetRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitSetFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/subscribe/subscribe.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage subscribe\n\nimport (\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n)\n\nvar (\n\t// Modes is the list of supported subscription modes.\n\tModes = [][2]string{\n\t\t{\"once\", \"a single request/response channel. The target creates the relevant update messages, transmits them, and subsequently closes the RPC\"},\n\t\t{\"stream\", \"long-lived subscriptions which continue to transmit updates relating to the set of paths that are covered within the subscription indefinitely\"},\n\t\t{\"poll\", \"on-demand retrieval of data items via long-lived RPCs\"},\n\t}\n\n\t// StreamModes is the list of supported streaming modes.\n\tStreamModes = [][2]string{\n\t\t{\"target-defined\", \"the target MUST determine the best type of subscription to be created on a per-leaf basis\"},\n\t\t{\"sample\", \"the value of the data item(s) MUST be sent once per sample interval to the client\"},\n\t\t{\"on-change\", \"data updates are only sent when the value of the data item changes\"},\n\t}\n)\n\n// New create the subscribe command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"subscribe\",\n\t\tAliases: []string{\"sub\"},\n\t\tShort:   \"subscribe to gnmi updates on targets\",\n\t\tAnnotations: map[string]string{\n\t\t\t\"--path\":        \"XPATH\",\n\t\t\t\"--prefix\":      \"PREFIX\",\n\t\t\t\"--model\":       \"MODEL\",\n\t\t\t\"--mode\":        \"SUBSC_MODE\",\n\t\t\t\"--stream-mode\": \"STREAM_MODE\",\n\t\t\t\"--name\":        \"SUBSCRIPTION\",\n\t\t\t\"--output\":      \"OUTPUT\",\n\t\t},\n\t\tPreRunE: gApp.SubscribePreRunE,\n\t\tRunE:    gApp.SubscribeRunE,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitSubscribeFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/tree/tree.go",
    "content": "package tree\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// New create the subscribe command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"tree\",\n\t\tShort: \"print the commands tree\",\n\t\tRunE:  gApp.RunETree,\n\t\tPostRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgApp.CleanupPlugins()\n\t\t},\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitTreeFlags(cmd)\n\treturn cmd\n}\n"
  },
  {
    "path": "pkg/cmd/version/version.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage version\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n)\n\n// New creates the version command tree.\nfunc New(gApp *app.App) *cobra.Command {\n\tversionCmd := &cobra.Command{\n\t\tUse:   \"version\",\n\t\tShort: \"show gnmic version\",\n\t\tPreRun: func(cmd *cobra.Command, _ []string) {\n\t\t\tgApp.Config.SetLocalFlagsFromFile(cmd)\n\t\t},\n\t\tRun: gApp.VersionRun,\n\t}\n\tversionCmd.AddCommand(newVersionUpgradeCmd(gApp))\n\n\treturn versionCmd\n}\n"
  },
  {
    "path": "pkg/cmd/version/versionUpgrade.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage version\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/app\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\n// newVersionUpgradeCmd creates the version upgrade command tree.\nfunc newVersionUpgradeCmd(gApp *app.App) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:     \"upgrade\",\n\t\tAliases: []string{\"up\"},\n\t\tShort:   \"upgrade gnmic to latest available version\",\n\t\tPreRun: func(cmd *cobra.Command, _ []string) {\n\t\t\tgApp.Config.SetLocalFlagsFromFile(cmd)\n\t\t},\n\t\tRunE: gApp.VersionUpgradeRun,\n\t}\n\tinitVersionUpgradeFlags(cmd, gApp)\n\treturn cmd\n}\n\nfunc initVersionUpgradeFlags(cmd *cobra.Command, gApp *app.App) {\n\tcmd.Flags().Bool(\"use-pkg\", false, \"upgrade using package\")\n\tcmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {\n\t\tgApp.Config.FileConfig.BindPFlag(fmt.Sprintf(\"%s-%s\", cmd.Name(), flag.Name), flag)\n\t})\n}\n"
  },
  {
    "path": "pkg/collector/api/const/const.go",
    "content": "package apiconst\n\nconst (\n\tAPIv1                 = \"/api/v1\"\n\tConfigAPIv1URL        = APIv1 + \"/config\"\n\tTargetsConfigAPIv1URL = ConfigAPIv1URL + \"/targets\"\n\n\tTargetsAPIv1URL       = APIv1 + \"/targets\"\n\tSubscriptionsAPIv1URL = APIv1 + \"/subscriptions\"\n\tOutputsAPIv1URL       = APIv1 + \"/outputs\"\n\tInputsAPIv1URL        = APIv1 + \"/inputs\"\n\n\tAssignmentsAPIv1URL = APIv1 + \"/assignments\"\n\tProcessorsAPIv1URL  = APIv1 + \"/processors\"\n)\n"
  },
  {
    "path": "pkg/collector/api/server/apiserver.go",
    "content": "package apiserver\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"net\"\n\t\"net/http\"\n\t\"sync\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/collector/env\"\n\tcluster_manager \"github.com/openconfig/gnmic/pkg/collector/managers/cluster\"\n\tinputs_manager \"github.com/openconfig/gnmic/pkg/collector/managers/inputs\"\n\toutputs_manager \"github.com/openconfig/gnmic/pkg/collector/managers/outputs\"\n\ttargets_manager \"github.com/openconfig/gnmic/pkg/collector/managers/targets\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype Server struct {\n\trouter *mux.Router\n\tstore  *collstore.Store\n\n\tlocker         lockers.Locker\n\ttargetsManager *targets_manager.TargetsManager\n\toutputsManager *outputs_manager.OutputsManager\n\tinputsManager  *inputs_manager.InputsManager\n\tclusterManager *cluster_manager.ClusterManager\n\tsrv            *http.Server\n\tlogger         *slog.Logger\n\treg            *prometheus.Registry\n\n\tapplyLock *sync.Mutex\n}\n\nfunc NewServer(\n\tstore *collstore.Store,\n\ttargetManager *targets_manager.TargetsManager,\n\toutputsManager *outputs_manager.OutputsManager,\n\tinputsManager *inputs_manager.InputsManager,\n\tclusterManager *cluster_manager.ClusterManager,\n\treg *prometheus.Registry,\n) *Server {\n\ts := &Server{\n\t\trouter:         mux.NewRouter(),\n\t\tstore:          store,\n\t\ttargetsManager: targetManager,\n\t\toutputsManager: outputsManager,\n\t\tinputsManager:  inputsManager,\n\t\tclusterManager: clusterManager,\n\t\treg:            reg,\n\t\tapplyLock:      new(sync.Mutex),\n\t}\n\ts.routes()\n\ts.registerMetrics()\n\treturn s\n}\n\nfunc (s *Server) Start(locker lockers.Locker, wg *sync.WaitGroup) error {\n\ts.locker = locker\n\ts.logger = logging.NewLogger(s.store.Config, \"component\", \"api-server\")\n\ts.logger.Info(\"starting API server\")\n\n\tapiServer, ok, err := s.store.Config.Get(\"api-server\", \"api-server\")\n\tif err != nil {\n\t\ts.logger.Error(\"failed to get api-server config\", \"error\", err)\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn nil\n\t}\n\tif apiServer == nil {\n\t\ts.logger.Info(\"api-server config not found, skipping API server\")\n\t\treturn nil\n\t}\n\tvar apiCfg *config.APIServer\n\tvar listener net.Listener\n\tswitch apiCfgImpl := apiServer.(type) {\n\tcase *config.APIServer:\n\t\tif apiCfgImpl == nil {\n\t\t\ts.logger.Info(\"api-server config is nil, skipping API server\")\n\t\t\treturn nil\n\t\t}\n\t\tapiCfg = apiCfgImpl\n\t\tenv.ExpandAPIEnv(apiCfg)\n\t\t// create listener\n\t\tlistener, err = createListener(apiCfg)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"failed to create listener\", \"error\", err)\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\ts.logger.Error(\"invalid api-server config\", \"config\", apiServer)\n\t\treturn fmt.Errorf(\"invalid api-server config: %v\", apiServer)\n\t}\n\ts.srv = &http.Server{\n\t\tAddr:    apiCfg.Address,\n\t\tHandler: s.router,\n\t\t// TODO: add timeouts\n\t\t// ReadTimeout:  apiCfg.Timeout / 2,\n\t\t// WriteTimeout: apiCfg.Timeout / 2,\n\t\t// IdleTimeout:  apiCfg.Timeout / 2,\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := s.srv.Serve(listener)\n\t\tif err != nil { // TODO: ignore shutdown errors\n\t\t\ts.logger.Error(\"failed to serve API server\", \"error\", err)\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (s *Server) Stop() {\n\ts.logger.Info(\"stopping API server\")\n\terr := s.srv.Shutdown(context.Background()) // TODO: change context ?\n\tif err != nil {\n\t\ts.logger.Error(\"failed to shutdown API server\", \"error\", err)\n\t}\n}\n\ntype APIErrors struct {\n\tErrors []string `json:\"errors,omitempty\"`\n}\n\nfunc createListener(apiCfg *config.APIServer) (net.Listener, error) {\n\tif apiCfg.TLS != nil {\n\t\ttlsCfg, err := utils.NewTLSConfig(\n\t\t\tapiCfg.TLS.CaFile,\n\t\t\tapiCfg.TLS.CertFile,\n\t\t\tapiCfg.TLS.KeyFile,\n\t\t\tapiCfg.TLS.ClientAuth,\n\t\t\tapiCfg.TLS.SkipVerify,\n\t\t\tfalse, // genSelfSigned\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn tls.Listen(\"tcp\", apiCfg.Address, tlsCfg)\n\t}\n\treturn net.Listen(\"tcp\", apiCfg.Address)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/apply.go",
    "content": "package apiserver\n\nimport (\n\t\"compress/gzip\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\n// Apply request is a request to apply the configuration to the collector.\n// Any object that is not provided in the request is deleted.\ntype ConfigApplyRequest struct {\n\tTargets             map[string]*types.TargetConfig       `json:\"targets\"`\n\tSubscriptions       map[string]*types.SubscriptionConfig `json:\"subscriptions\"`\n\tOutputs             map[string]map[string]any            `json:\"outputs\"`\n\tInputs              map[string]map[string]any            `json:\"inputs\"`\n\tProcessors          map[string]map[string]any            `json:\"processors\"`\n\tTunnelTargetMatches map[string]*config.TunnelTargetMatch `json:\"tunnel-target-matches\"`\n}\n\nfunc validateApplyRequest(req *ConfigApplyRequest) error {\n\tif len(req.Targets) == 0 && len(req.Subscriptions) == 0 &&\n\t\tlen(req.Outputs) == 0 && len(req.Inputs) == 0 &&\n\t\tlen(req.Processors) == 0 &&\n\t\tlen(req.TunnelTargetMatches) == 0 {\n\t\treturn nil // valid reset request\n\t}\n\tif len(req.Targets) > 0 && len(req.Subscriptions) == 0 {\n\t\treturn errors.New(\"if targets are provided, at least one subscription is required\")\n\t}\n\tif len(req.TunnelTargetMatches) > 0 && len(req.Subscriptions) == 0 {\n\t\treturn errors.New(\"if tunnel-target-matches are provided, at least one subscription is required\")\n\t}\n\tif len(req.Inputs) > 0 && len(req.Outputs) == 0 {\n\t\treturn errors.New(\"if inputs are provided, at least one output is required\")\n\t}\n\t// TODO: validate each config\n\t// TODO: validate references\n\treturn nil\n}\n\nfunc (s *Server) handleConfigApply(w http.ResponseWriter, r *http.Request) {\n\ts.applyLock.Lock()\n\tdefer s.applyLock.Unlock()\n\n\tvar reader io.Reader = r.Body\n\t// if content is gzip, decompress it\n\tif r.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tgz, err := gzip.NewReader(r.Body)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"gzip error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tdefer gz.Close()\n\t\treader = gz\n\t}\n\treq, err := decodeRequest(reader)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"decode error: \" + err.Error()}})\n\t\treturn\n\t}\n\n\terr = validateApplyRequest(req)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"validate error: \" + err.Error()}})\n\t\treturn\n\t}\n\t// delete subscriptions\n\texistingSubscriptions, err := s.store.Config.Keys(\"subscriptions\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get subscriptions error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor _, name := range existingSubscriptions {\n\t\tif _, ok := req.Subscriptions[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"subscriptions\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// delete targets (skip tunnel-created targets which have TunnelTargetType set)\n\texistingTargets, err := s.store.Config.List(\"targets\", func(_ string, val any) bool {\n\t\t// only include non-tunnel targets (TunnelTargetType == \"\")\n\t\tif tc, ok := val.(*types.TargetConfig); ok {\n\t\t\treturn tc.TunnelTargetType == \"\"\n\t\t}\n\t\treturn true\n\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get targets error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor name := range existingTargets {\n\t\tif _, ok := req.Targets[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"targets\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"delete target error: \" + err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// delete inputs\n\texistingInputs, err := s.store.Config.Keys(\"inputs\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get inputs error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor _, name := range existingInputs {\n\t\tif _, ok := req.Inputs[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"inputs\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"delete input error: \" + err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// delete outputs\n\texistingOutputs, err := s.store.Config.Keys(\"outputs\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get outputs error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor _, name := range existingOutputs {\n\t\tif _, ok := req.Outputs[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"outputs\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"delete output error: \" + err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// delete processors\n\texistingProcessors, err := s.store.Config.Keys(\"processors\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get processors error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor _, name := range existingProcessors {\n\t\tif _, ok := req.Processors[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"processors\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"delete processor error: \" + err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// delete tunnel-target-matches\n\texistingTunnelTargetMatches, err := s.store.Config.Keys(\"tunnel-target-matches\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"get tunnel-target-matches error: \" + err.Error()}})\n\t\treturn\n\t}\n\tfor _, name := range existingTunnelTargetMatches {\n\t\tif _, ok := req.TunnelTargetMatches[name]; !ok {\n\t\t\t_, _, err := s.store.Config.Delete(\"tunnel-target-matches\", name)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"delete tunnel-target-match error: \" + err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t//\n\t// apply subscriptions\n\tfor name, cfg := range req.Subscriptions {\n\t\t_, err = s.store.Config.Set(\"subscriptions\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set subscription error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// apply processors\n\tfor name, cfg := range req.Processors {\n\t\t_, err = s.store.Config.Set(\"processors\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set processor error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// apply outputs\n\tfor name, cfg := range req.Outputs {\n\t\t_, err = s.store.Config.Set(\"outputs\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set output error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// apply targets\n\tfor name, cfg := range req.Targets {\n\t\t_, err = s.store.Config.Set(\"targets\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set target error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// apply inputs\n\tfor name, cfg := range req.Inputs {\n\t\t_, err = s.store.Config.Set(\"inputs\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set input error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// apply tunnel-target-matches\n\tfor name, cfg := range req.TunnelTargetMatches {\n\t\t_, err = s.store.Config.Set(\"tunnel-target-matches\", name, cfg)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"set tunnel-target-match error: \" + err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc decodeRequest(reader io.Reader) (*ConfigApplyRequest, error) {\n\tdec := json.NewDecoder(reader)\n\treqMap := make(map[string]any)\n\terr := dec.Decode(&reqMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := decodeRequestMap(reqMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n\nfunc decodeRequestMap(reqMap map[string]any) (*ConfigApplyRequest, error) {\n\treq := new(ConfigApplyRequest)\n\tmdec, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     req,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = mdec.Decode(reqMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}\n"
  },
  {
    "path": "pkg/collector/api/server/assignment.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"sort\"\n\n\t\"github.com/gorilla/mux\"\n)\n\ntype assignmentConfig struct {\n\tAssignments   []*assignement `json:\"assignments\"`\n\tUnassignments []string       `json:\"unassignments,omitempty\"`\n}\n\nfunc (a *assignmentConfig) validate() error {\n\tif len(a.Assignments) == 0 && len(a.Unassignments) == 0 {\n\t\treturn fmt.Errorf(\"assignments or unassignments is required\")\n\t}\n\tif len(a.Assignments) > 0 {\n\t\tfor _, assignment := range a.Assignments {\n\t\t\tif assignment.Target == \"\" {\n\t\t\t\treturn fmt.Errorf(\"target is required\")\n\t\t\t}\n\t\t\tif assignment.Member == \"\" {\n\t\t\t\treturn fmt.Errorf(\"member is required\")\n\t\t\t}\n\t\t}\n\t}\n\tif len(a.Unassignments) > 0 {\n\t\tfor _, unassignment := range a.Unassignments {\n\t\t\tif unassignment == \"\" {\n\t\t\t\treturn fmt.Errorf(\"unassignment is required\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype assignement struct {\n\tTarget string `json:\"target,omitempty\"`\n\tMember string `json:\"member,omitempty\"`\n\t// Epoch  int64  `json:\"epoch,omitempty\"`\n}\n\n// create an assignment by sending a POST request to the assignments endpoint\n// sample body:\n//\n//\t{\n//\t\t\"assignments\": [{\"target\": \"target1\", \"member\": \"member1\", \"epoch\": 1}, {\"target\": \"target2\", \"member\": \"member2\", \"epoch\": 2}]\t// list of target names\n//\t}\n//\n// sample curl command:\n// curl --request POST -H \"Content-Type: application/json\" \\\n// -d '{\"assignments\": [{\"target\": \"target1\", \"member\": \"member1\", \"epoch\": 1}, {\"target\": \"target2\", \"member\": \"member2\", \"epoch\": 2}]}' \\\n// http://localhost:8080/api/v1/assignments\nfunc (s *Server) handleAssignmentPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tcfg := new(assignmentConfig)\n\terr = json.Unmarshal(body, &cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif cfg == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"invalid assignment config\"}})\n\t\treturn\n\t}\n\n\terr = cfg.validate()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tfor _, assignment := range cfg.Assignments {\n\t\t_, err := s.store.Config.Set(\"assignments\", assignment.Target, assignment)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor _, unassignment := range cfg.Unassignments {\n\t\t_, _, err = s.store.Config.Delete(\"assignments\", unassignment)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\n// delete an assignment by sending a DELETE request to the assignments endpoint\n// sample curl command:\n// curl --request DELETE http://localhost:8080/api/v1/assignments/target1\nfunc (s *Server) handleAssignmentDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tok, _, err := s.store.Config.Delete(\"assignments\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"assignment not found\"}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\ntype assignmentResponse struct {\n\tMember  string   `json:\"member\"`\n\tTargets []string `json:\"targets\"`\n}\n\nfunc (s *Server) handleAssignmentGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tassignments, err := s.store.Config.List(\"assignments\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tar := &assignmentResponse{\n\t\t\tTargets: make([]string, 0, len(assignments)),\n\t\t}\n\t\tfor k, v := range assignments {\n\t\t\tif ar.Member == \"\" {\n\t\t\t\tvm, ok := v.(*assignement)\n\t\t\t\tif ok {\n\t\t\t\t\tar.Member = vm.Member\n\t\t\t\t}\n\t\t\t}\n\t\t\tar.Targets = append(ar.Targets, k)\n\t\t}\n\t\tsort.Strings(ar.Targets)\n\t\terr = json.NewEncoder(w).Encode(ar)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tassignment, ok, err := s.store.Config.Get(\"assignments\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"assignment not found\"}})\n\t\treturn\n\t}\n\terr = json.NewEncoder(w).Encode(assignment)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/api/server/cluster.go",
    "content": "package apiserver\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/gorilla/mux\"\n\tcluster_manager \"github.com/openconfig/gnmic/pkg/collector/managers/cluster\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\ntype clusteringResponse struct {\n\tClusterName           string          `json:\"name,omitempty\"`\n\tNumberOfLockedTargets int             `json:\"number-of-locked-targets\"`\n\tLeader                string          `json:\"leader,omitempty\"`\n\tMembers               []clusterMember `json:\"members,omitempty\"`\n}\n\ntype clusterMember struct {\n\tName                  string   `json:\"name,omitempty\"`\n\tAPIEndpoint           string   `json:\"api-endpoint,omitempty\"`\n\tIsLeader              bool     `json:\"is-leader,omitempty\"`\n\tNumberOfLockedTargets int      `json:\"number-of-locked-nodes\"`\n\tLockedTargets         []string `json:\"locked-targets,omitempty\"`\n}\n\nfunc (s *Server) requireClustering(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif s.locker == nil {\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering is not enabled\"}})\n\t\t\treturn\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\nfunc (s *Server) handleClusteringGet(w http.ResponseWriter, r *http.Request) {\n\t// clusteringResponse\n\tclusteringCfg, ok, err := s.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config not found\"}})\n\t\treturn\n\t}\n\tclustering, ok := clusteringCfg.(*config.Clustering)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config is not a config.Clustering\"}})\n\t\treturn\n\t}\n\tif clustering == nil {\n\t\treturn\n\t}\n\tcr := &clusteringResponse{\n\t\tClusterName:           clustering.ClusterName,\n\t\tNumberOfLockedTargets: 0,\n\t\tLeader:                \"\",\n\t\tMembers:               make([]clusterMember, 0),\n\t}\n\tcr.Leader, err = s.clusterManager.GetLeaderName(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tservices, err := s.locker.GetServices(r.Context(), fmt.Sprintf(\"%s-gnmic-api\", clustering.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tinstanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(r.Context())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfor _, v := range instanceNodes {\n\t\tcr.NumberOfLockedTargets += len(v)\n\t}\n\n\tcr.Members = make([]clusterMember, len(services))\n\tfor i, srv := range services {\n\t\tscheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: srv.Tags})\n\t\tcr.Members[i].APIEndpoint = fmt.Sprintf(\"%s://%s\", scheme, srv.Address)\n\t\tcr.Members[i].Name = strings.TrimSuffix(srv.ID, \"-api\")\n\t\tcr.Members[i].IsLeader = cr.Leader == cr.Members[i].Name\n\t\tcr.Members[i].NumberOfLockedTargets = len(instanceNodes[cr.Members[i].Name])\n\t\tcr.Members[i].LockedTargets = instanceNodes[cr.Members[i].Name]\n\t}\n\n\terr = json.NewEncoder(w).Encode(cr)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (s *Server) handleClusterRebalance(w http.ResponseWriter, r *http.Request) {\n\tisLeader, err := s.clusterManager.IsLeader(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !isLeader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\terr = s.clusterManager.RebalanceTargetsV2()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\ts.logger.Info(\"rebalance targets completed\")\n\tw.WriteHeader(http.StatusAccepted)\n}\n\nfunc (s *Server) handleClusteringLeaderGet(w http.ResponseWriter, r *http.Request) {\n\tclusteringCfg, ok, err := s.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config not found\"}})\n\t\treturn\n\t}\n\tclustering, ok := clusteringCfg.(*config.Clustering)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config is not a config.Clustering\"}})\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\t// get leader\n\tleader, err := s.clusterManager.GetLeaderName(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tservices, err := s.locker.GetServices(ctx, fmt.Sprintf(\"%s-gnmic-api\", clustering.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tinstanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(ctx)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tmembers := make([]clusterMember, 1)\n\tfor _, s := range services {\n\t\tif strings.TrimSuffix(s.ID, \"-api\") != leader {\n\t\t\tcontinue\n\t\t}\n\t\tscheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: s.Tags})\n\t\t// add the leader as a member then break from loop\n\t\tmembers[0].APIEndpoint = fmt.Sprintf(\"%s://%s\", scheme, s.Address)\n\t\tmembers[0].Name = strings.TrimSuffix(s.ID, \"-api\")\n\t\tmembers[0].IsLeader = true\n\t\tmembers[0].NumberOfLockedTargets = len(instanceNodes[members[0].Name])\n\t\tmembers[0].LockedTargets = instanceNodes[members[0].Name]\n\t\tbreak\n\t}\n\tb, err := json.Marshal(members)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.Write(b)\n}\n\nfunc (s *Server) handleClusteringLeaderDelete(w http.ResponseWriter, r *http.Request) {\n\tleader, err := s.clusterManager.IsLeader(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !leader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\terr = s.clusterManager.WithdrawLeader(r.Context(), 30*time.Second)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (s *Server) handleClusteringMembersGet(w http.ResponseWriter, r *http.Request) {\n\t// clusteringResponse\n\tclusteringCfg, ok, err := s.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config not found\"}})\n\t\treturn\n\t}\n\tclustering, ok := clusteringCfg.(*config.Clustering)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"clustering config is not a config.Clustering\"}})\n\t\treturn\n\t}\n\t// get leader\n\tleader, err := s.clusterManager.GetLeaderName(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tservices, err := s.locker.GetServices(r.Context(), fmt.Sprintf(\"%s-gnmic-api\", clustering.ClusterName), nil)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tinstanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tmembers := make([]clusterMember, len(services))\n\tfor i, s := range services {\n\t\tscheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: s.Tags})\n\t\tmembers[i].APIEndpoint = fmt.Sprintf(\"%s://%s\", scheme, s.Address)\n\t\tmembers[i].Name = strings.TrimSuffix(s.ID, \"-api\")\n\t\tmembers[i].IsLeader = leader == members[i].Name\n\t\tmembers[i].NumberOfLockedTargets = len(instanceNodes[members[i].Name])\n\t\tmembers[i].LockedTargets = instanceNodes[members[i].Name]\n\t}\n\tb, err := json.Marshal(members)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (s *Server) handleClusteringDrainInstance(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"member id is required\"}})\n\t\treturn\n\t}\n\tleader, err := s.clusterManager.IsLeader(r.Context())\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !leader {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not leader\"}})\n\t\treturn\n\t}\n\terr = s.clusterManager.DrainMember(r.Context(), id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\ntype moveRequest struct {\n\tTarget            string `json:\"target,omitempty\"`\n\tDestinationMember string `json:\"member,omitempty\"`\n}\n\nfunc (s *Server) handleClusterMove(w http.ResponseWriter, r *http.Request) {\n\t// read body\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tvar moveRequest moveRequest\n\n\terr = json.Unmarshal(body, &moveRequest)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif moveRequest.Target == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target is required\"}})\n\t\treturn\n\t}\n\tif moveRequest.DestinationMember == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"member is required\"}})\n\t\treturn\n\t}\n\t// TODO: implement move target\n\t// err = s.clusterManager.MoveTarget(r.Context(), moveRequest.Target, moveRequest.DestinationMember)\n\t// if err != nil {\n\t// \tw.WriteHeader(http.StatusInternalServerError)\n\t// \t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t// \treturn\n\t// }\n\t// w.WriteHeader(http.StatusOK)\n}\n\n// //\nfunc (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) {\n\tres, err := s.store.Config.GetAll()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tconfigs := make(map[string]any)\n\tfor k, v := range res {\n\t\tconfigs[k] = v\n\t}\n\tsanitizedRes := sanitizeConfig(configs)\n\terr = json.NewEncoder(w).Encode(sanitizedRes)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc sanitizeConfig(res map[string]any) map[string]any {\n\tkeys := []string{\n\t\t\"api-server\",\n\t\t\"gnmi-server\",\n\t\t\"loader\",\n\t\t\"clustering\",\n\t\t\"global-flags\",\n\t\t\"tunnel-server\",\n\t}\n\n\tfor _, key := range keys {\n\t\tval, ok := res[key]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tswitch v := val.(type) {\n\t\tcase map[string]any:\n\t\t\tres[key] = v[key]\n\t\tdefault:\n\t\t}\n\t}\n\n\treturn res\n}\n\nfunc (s *Server) handleHealthzGet(w http.ResponseWriter, r *http.Request) {\n\tres := map[string]string{\"status\": \"healthy\"}\n\tb, err := json.Marshal(res)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = w.Write(b)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t}\n}\n\nfunc (s *Server) handleAdminShutdown(w http.ResponseWriter, r *http.Request) {\n\t// Not implemented yet\n\tw.WriteHeader(http.StatusNotImplemented)\n\t_ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{\"shutdown not implemented\"}})\n}\n"
  },
  {
    "path": "pkg/collector/api/server/inputs.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"slices\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n)\n\nfunc (s *Server) handleConfigInputsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tinputs, err := s.store.Config.List(\"inputs\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(inputs)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tinput, ok, err := s.store.Config.Get(\"inputs\", id)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"input not found\"}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(input)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *Server) handleConfigInputsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tcfg := map[string]any{}\n\tfmt.Println(\"body\", string(body))\n\terr = json.Unmarshal(body, &cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Println(\"err\", err)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif cfg == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"invalid input config\"}})\n\t\treturn\n\t}\n\tinputType, ok := cfg[\"type\"].(string)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"input type is required\"}})\n\t\treturn\n\t}\n\tif !slices.Contains(inputs.InputTypes, inputType) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown input type: %q\", inputType)}})\n\t\treturn\n\t}\n\tinputName, ok := cfg[\"name\"].(string)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"input name is required\"}})\n\t\treturn\n\t}\n\tif inputName == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"input name is required\"}})\n\t\treturn\n\t}\n\tinitializer := inputs.Inputs[inputType]\n\tif initializer == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown input type: %q\", inputType)}})\n\t\treturn\n\t}\n\timpl := initializer()\n\terr = impl.Validate(cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t// validate event processors exist\n\tevps, ok := cfg[\"event-processors\"].([]string)\n\tif ok {\n\t\tfor _, ep := range evps {\n\t\t\t_, ok, err := s.store.Config.Get(\"processors\", ep)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"event processor %s not found\", ep)}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t// validate outputs exist\n\touts, ok := cfg[\"outputs\"].([]string)\n\tif ok {\n\t\tfor _, out := range outs {\n\t\t\t_, ok, err := s.store.Config.Get(\"outputs\", out)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"output %s not found\", out)}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t_, err = s.store.Config.Set(\"inputs\", inputName, cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigInputsProcessorsPatch(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNotImplemented)\n\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not implemented\"}})\n}\n\nfunc (s *Server) handleConfigInputsOutputsPatch(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNotImplemented)\n\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not implemented\"}})\n}\n\nfunc (s *Server) handleConfigInputsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tok, _, err := s.store.Config.Delete(\"inputs\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"input not found\"}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/metrics.go",
    "content": "package apiserver\n\nimport \"github.com/prometheus/client_golang/prometheus/collectors\"\n\nfunc (s *Server) registerMetrics() {\n\ts.reg.MustRegister(collectors.NewGoCollector())\n\ts.reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))\n}\n"
  },
  {
    "path": "pkg/collector/api/server/outputs.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\n// get all outputs\n// curl command:\n// curl http://localhost:8080/api/v1/outputs\nfunc (s *Server) handleConfigOutputsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\toutputs, err := s.store.Config.List(\"outputs\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(outputs)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t} else {\n\t\toutput, ok, err := s.store.Config.Get(\"outputs\", id)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"output not found\"}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(output)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// sample body:\n//\n//\t{\n//\t\t\"name\": \"output1\",\n//\t\t\"type\": \"file\",\n//\t\t\"filename\": \"output.txt\"\n//\t}\n//\n// curl command:\n// curl --request POST -H \"Content-Type: application/json\" \\\n// -d '{\"name\": \"output1\", \"type\": \"file\", \"filename\": \"output.txt\"}' \\\n// http://localhost:8080/api/v1/outputs\nfunc (s *Server) handleConfigOutputsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tcfg := map[string]any{}\n\terr = json.Unmarshal(body, &cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif cfg == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"invalid output config\"}})\n\t\treturn\n\t}\n\toutputName, ok := cfg[\"name\"].(string)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"output name is required\"}})\n\t\treturn\n\t}\n\tif outputName == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"output name is required\"}})\n\t\treturn\n\t}\n\tinitializer := outputs.Outputs[cfg[\"type\"].(string)]\n\tif initializer == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"unknown output type\"}})\n\t\treturn\n\t}\n\timpl := initializer()\n\terr = impl.Validate(cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tevps, ok := cfg[\"event-processors\"].([]string)\n\tif ok {\n\t\tfor _, ep := range evps {\n\t\t\t_, ok, err := s.store.Config.Get(\"processors\", ep)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"event processor %s not found\", ep)}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t_, err = s.store.Config.Set(\"outputs\", outputName, cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigOutputsProcessorsPatch(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNotImplemented)\n\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"not implemented\"}})\n}\n\nfunc (s *Server) handleConfigOutputsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\terr := s.outputsManager.DeleteOutput(id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/processors.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n)\n\ntype ProcessorConfigResponse struct {\n\tName   string `json:\"name\"`\n\tType   string `json:\"type\"`\n\tConfig any    `json:\"config\"`\n}\n\ntype ProcessorConfigRequest struct {\n\tName   string `json:\"name\"`\n\tType   string `json:\"type\"`\n\tConfig any    `json:\"config\"`\n}\n\nfunc (s *Server) handleConfigProcessorsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id != \"\" {\n\t\tprocessor, ok, err := s.store.Config.Get(\"processors\", id)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"processor not found\"}})\n\t\t\treturn\n\t\t}\n\t\tprocessorConfig := ProcessorConfigResponse{\n\t\t\tName: id,\n\t\t}\n\t\tfor k, v := range processor.(map[string]any) {\n\t\t\tswitch v.(type) {\n\t\t\tcase map[string]any:\n\t\t\t\tprocessorConfig.Type = k\n\t\t\t\tprocessorConfig.Config = v\n\t\t\tdefault:\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown processor type: %T\", v)}})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(processorConfig)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tprocessors, err := s.store.Config.List(\"processors\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tprocessorConfigs := make([]ProcessorConfigResponse, 0, len(processors))\n\tfor name, processor := range processors {\n\t\tswitch processor := processor.(type) {\n\t\tcase map[string]any:\n\t\t\tprocessorConfig := ProcessorConfigResponse{\n\t\t\t\tName: name,\n\t\t\t}\n\t\t\tfor k, v := range processor {\n\t\t\t\tswitch v.(type) {\n\t\t\t\tcase map[string]any:\n\t\t\t\t\tprocessorConfig.Type = k\n\t\t\t\t\tprocessorConfig.Config = v\n\t\t\t\tdefault:\n\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown processor type: %T\", v)}})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tprocessorConfigs = append(processorConfigs, processorConfig)\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown processor type: %T\", processor)}})\n\t\t\treturn\n\t\t}\n\t}\n\terr = json.NewEncoder(w).Encode(processorConfigs)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (s *Server) handleConfigProcessorsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tcfg := new(ProcessorConfigRequest)\n\terr = json.Unmarshal(body, &cfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif cfg == nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"invalid processor config\"}})\n\t\treturn\n\t}\n\tif cfg.Name == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"processor name is required\"}})\n\t\treturn\n\t}\n\tif cfg.Type == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"processor type is required\"}})\n\t\treturn\n\t}\n\tstoreCfg := map[string]any{\n\t\tcfg.Type: cfg.Config,\n\t}\n\t_, err = s.store.Config.Set(\"processors\", cfg.Name, storeCfg)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigProcessorsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif s.outputsManager.ProcessorInUse(id) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"processor is in use by outputs\"}})\n\t\treturn\n\t}\n\tif s.inputsManager.ProcessorInUse(id) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"processor is in use by inputs\"}})\n\t\treturn\n\t}\n\t_, _, err := s.store.Config.Delete(\"processors\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/routes.go",
    "content": "package apiserver\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nfunc (s *Server) routes() {\n\ts.router.Handle(\"/metrics\", promhttp.HandlerFor(s.reg, promhttp.HandlerOpts{}))\n\n\tapiV1 := s.router.PathPrefix(\"/api/v1\").Subrouter()\n\ts.clusterRoutes(apiV1)\n\ts.configRoutes(apiV1)\n\ts.targetRoutes(apiV1)\n\ts.subscriptionRoutes(apiV1)\n\ts.healthRoutes(apiV1)\n\ts.adminRoutes(apiV1)\n\ts.outputsRoutes(apiV1)\n\ts.inputsRoutes(apiV1)\n\ts.processorsRoutes(apiV1)\n\ts.assignmentRoutes(apiV1)\n\ts.sseRoutes(apiV1)\n}\n\nfunc (s *Server) healthRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/healthz\", s.handleHealthzGet).Methods(http.MethodGet)\n}\n\nfunc (s *Server) adminRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/admin/shutdown\", s.handleAdminShutdown).Methods(http.MethodPost)\n}\n\nfunc (s *Server) clusterRoutes(r *mux.Router) {\n\tcluster := r.PathPrefix(\"/cluster\").Subrouter()\n\tcluster.Use(s.requireClustering)\n\tcluster.HandleFunc(\"\", s.handleClusteringGet).Methods(http.MethodGet)\n\tcluster.HandleFunc(\"/rebalance\", s.handleClusterRebalance).Methods(http.MethodPost)\n\tcluster.HandleFunc(\"/leader\", s.handleClusteringLeaderGet).Methods(http.MethodGet)\n\tcluster.HandleFunc(\"/leader\", s.handleClusteringLeaderDelete).Methods(http.MethodDelete)\n\tcluster.HandleFunc(\"/members\", s.handleClusteringMembersGet).Methods(http.MethodGet)\n\tcluster.HandleFunc(\"/members/{id}/drain\", s.handleClusteringDrainInstance).Methods(http.MethodPost)\n\tcluster.HandleFunc(\"/move\", s.handleClusterMove).Methods(http.MethodPost)\n}\n\nfunc (s *Server) configRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/config\", s.handleConfig).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/apply\", s.handleConfigApply).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/targets\", s.handleConfigTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/targets/{id}\", s.handleConfigTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/targets\", s.handleConfigTargetsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/targets/{id}\", s.handleConfigTargetsDelete).Methods(http.MethodDelete)\n\tr.HandleFunc(\"/config/targets/{id}/subscriptions\", s.handleConfigTargetsSubscriptionsPatch).Methods(http.MethodPatch)\n\tr.HandleFunc(\"/config/targets/{id}/outputs\", s.handleConfigTargetsOutputsPatch).Methods(http.MethodPatch)\n\tr.HandleFunc(\"/config/targets/{id}/state\", s.handleTargetsStatePost).Methods(http.MethodPost)\n\t//\n\tr.HandleFunc(\"/config/subscriptions\", s.handleConfigSubscriptionsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/subscriptions\", s.handleConfigSubscriptionsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/subscriptions/{id}\", s.handleConfigSubscriptionsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/subscriptions/{id}\", s.handleConfigSubscriptionsDelete).Methods(http.MethodDelete)\n\t//\n\tr.HandleFunc(\"/config/outputs\", s.handleConfigOutputsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/outputs\", s.handleConfigOutputsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/outputs/{id}\", s.handleConfigOutputsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/outputs/{id}/processors\", s.handleConfigOutputsProcessorsPatch).Methods(http.MethodPatch)\n\tr.HandleFunc(\"/config/outputs/{id}\", s.handleConfigOutputsDelete).Methods(http.MethodDelete)\n\t//\n\tr.HandleFunc(\"/config/inputs\", s.handleConfigInputsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/inputs\", s.handleConfigInputsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/inputs/{id}\", s.handleConfigInputsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/inputs/{id}/processors\", s.handleConfigInputsProcessorsPatch).Methods(http.MethodPatch)\n\tr.HandleFunc(\"/config/inputs/{id}/outputs\", s.handleConfigInputsOutputsPatch).Methods(http.MethodPatch)\n\tr.HandleFunc(\"/config/inputs/{id}\", s.handleConfigInputsDelete).Methods(http.MethodDelete)\n\t//\n\tr.HandleFunc(\"/config/processors\", s.handleConfigProcessorsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/processors\", s.handleConfigProcessorsPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/processors/{id}\", s.handleConfigProcessorsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/processors/{id}\", s.handleConfigProcessorsDelete).Methods(http.MethodDelete)\n\t//\n\tr.HandleFunc(\"/config/tunnel-target-matches\", s.handleConfigTunnelTargetMatchesGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/tunnel-target-matches\", s.handleConfigTunnelTargetMatchesPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/config/tunnel-target-matches/{id}\", s.handleConfigTunnelTargetMatchesGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/config/tunnel-target-matches/{id}\", s.handleConfigTunnelTargetMatchesDelete).Methods(http.MethodDelete)\n}\n\nfunc (s *Server) targetRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/targets\", s.handleTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/targets/{id}\", s.handleTargetsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/targets/{id}/state/{state}\", s.handleTargetsStatePost).Methods(http.MethodPost)\n}\n\nfunc (s *Server) subscriptionRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/subscriptions\", s.handleSubscriptionsGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/subscriptions/{id}\", s.handleSubscriptionsGet).Methods(http.MethodGet)\n}\n\nfunc (s *Server) outputsRoutes(r *mux.Router) {\n\t// r.HandleFunc(\"/outputs\", c.handleOutputsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/outputs\", c.handleOutputsPost).Methods(http.MethodPost)\n\t// r.HandleFunc(\"/outputs/{id}\", c.handleOutputsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/outputs/{id}\", c.handleOutputsPatch).Methods(http.MethodPatch)\n\t// r.HandleFunc(\"/outputs/{id}\", c.handleOutputsDelete).Methods(http.MethodDelete)\n}\n\nfunc (s *Server) inputsRoutes(r *mux.Router) {\n\t// r.HandleFunc(\"/inputs\", c.handleInputsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/inputs\", c.handleInputsPost).Methods(http.MethodPost)\n\t// r.HandleFunc(\"/inputs/{id}\", c.handleInputsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/inputs/{id}\", c.handleInputsDelete).Methods(http.MethodDelete)\n}\n\nfunc (s *Server) processorsRoutes(r *mux.Router) {\n\t// r.HandleFunc(\"/processors\", c.handleProcessorsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/processors\", c.handleProcessorsPost).Methods(http.MethodPost)\n\t// r.HandleFunc(\"/processors/{id}\", c.handleProcessorsGet).Methods(http.MethodGet)\n\t// r.HandleFunc(\"/processors/{id}\", c.handleProcessorsDelete).Methods(http.MethodDelete)\n}\n\nfunc (s *Server) assignmentRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/assignments\", s.handleAssignmentGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/assignments\", s.handleAssignmentPost).Methods(http.MethodPost)\n\tr.HandleFunc(\"/assignments/{id}\", s.handleAssignmentGet).Methods(http.MethodGet)\n\tr.HandleFunc(\"/assignments/{id}\", s.handleAssignmentDelete).Methods(http.MethodDelete)\n}\n\nfunc (s *Server) sseRoutes(r *mux.Router) {\n\tr.HandleFunc(\"/sse/{kind}\", s.handleSSE).Methods(http.MethodGet)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/sse.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/gorilla/mux\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\n// validSSEKinds are the store kinds that can be streamed via SSE.\nvar validSSEKinds = map[string]struct{}{\n\tcollstore.KindTargets:             {},\n\tcollstore.KindOutputs:             {},\n\tcollstore.KindInputs:              {},\n\tcollstore.KindSubscriptions:       {},\n\tcollstore.KindProcessors:          {},\n\tcollstore.KindAssignments:         {},\n\tcollstore.KindTunnelTargetMatches: {},\n}\n\n// sseEvent is the JSON payload sent for each SSE event.\ntype sseEvent struct {\n\tTimestamp time.Time `json:\"timestamp\"`  // when the event was emitted\n\tStore     string    `json:\"store\"`      // \"config\" or \"state\"\n\tKind      string    `json:\"kind\"`       // targets, outputs, inputs, subscriptions\n\tName      string    `json:\"name\"`       // entry name / key\n\tEventType string    `json:\"event-type\"` // create, update, delete\n\tObject    any       `json:\"object\"`     // the entry value\n}\n\n// handleSSE streams store changes for a given kind as Server-Sent Events.\n//\n// GET /api/v1/sse/{kind}?store=config|state|all\n//\n// Query parameter \"store\" selects which store(s) to watch:\n//   - \"config\" — config store only\n//   - \"state\"  — state store only\n//   - \"all\"    — both (default)\n//\n// The client receives an event stream where each event is a JSON-encoded\n// sseEvent. An initial snapshot of existing entries is sent first (as\n// \"create\" events), followed by live updates.\nfunc (s *Server) handleSSE(w http.ResponseWriter, r *http.Request) {\n\tkind := mux.Vars(r)[\"kind\"]\n\tif _, ok := validSSEKinds[kind]; !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{\n\t\t\tErrors: []string{fmt.Sprintf(\"invalid kind %q; expected one of: targets, outputs, inputs, subscriptions\", kind)},\n\t\t})\n\t\treturn\n\t}\n\n\tstoreFilter := r.URL.Query().Get(\"store\")\n\tif storeFilter == \"\" {\n\t\tstoreFilter = \"all\"\n\t}\n\tif storeFilter != \"config\" && storeFilter != \"state\" && storeFilter != \"all\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{\n\t\t\tErrors: []string{fmt.Sprintf(\"invalid store %q; expected one of: config, state, all\", storeFilter)},\n\t\t})\n\t\treturn\n\t}\n\n\t// ensure the ResponseWriter supports flushing.\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\thttp.Error(w, \"streaming not supported\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// set up watches.\n\tvar configCh <-chan *store.Event[any]\n\tvar configCancel func()\n\tvar stateCh <-chan *store.Event[any]\n\tvar stateCancel func()\n\n\tif storeFilter == \"config\" || storeFilter == \"all\" {\n\t\tvar err error\n\t\tconfigCh, configCancel, err = s.store.Config.Watch(kind, store.WithInitialReplay[any]())\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tdefer configCancel()\n\t}\n\tif storeFilter == \"state\" || storeFilter == \"all\" {\n\t\tvar err error\n\t\tstateCh, stateCancel, err = s.store.State.Watch(kind, store.WithInitialReplay[any]())\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tdefer stateCancel()\n\t}\n\n\t// set SSE headers.\n\tw.Header().Set(\"Content-Type\", \"text/event-stream\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\tw.Header().Set(\"Connection\", \"keep-alive\")\n\tw.Header().Set(\"X-Accel-Buffering\", \"no\") // disable nginx buffering\n\tflusher.Flush()\n\n\tctx := r.Context()\n\tkeepalive := time.NewTicker(15 * time.Second)\n\tdefer keepalive.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-keepalive.C:\n\t\t\t// SSE comment line as keepalive to detect broken connections.\n\t\t\tfmt.Fprintf(w, \": keepalive\\n\\n\")\n\t\t\tflusher.Flush()\n\t\tcase ev, ok := <-configCh:\n\t\t\tif !ok {\n\t\t\t\tconfigCh = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts.sendSSEEvent(w, flusher, \"config\", ev)\n\t\tcase ev, ok := <-stateCh:\n\t\t\tif !ok {\n\t\t\t\tstateCh = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts.sendSSEEvent(w, flusher, \"state\", ev)\n\t\t}\n\t}\n}\n\nfunc (s *Server) sendSSEEvent(w http.ResponseWriter, flusher http.Flusher, storeName string, ev *store.Event[any]) {\n\tdata, err := json.Marshal(sseEvent{\n\t\tTimestamp: time.Now(),\n\t\tStore:     storeName,\n\t\tKind:      ev.Kind,\n\t\tName:      ev.Name,\n\t\tEventType: string(ev.EventType),\n\t\tObject:    ev.Object,\n\t})\n\tif err != nil {\n\t\ts.logger.Error(\"failed to marshal SSE event\", \"error\", err)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"event: %s\\ndata: %s\\n\\n\", ev.EventType, data)\n\tflusher.Flush()\n}\n"
  },
  {
    "path": "pkg/collector/api/server/subscriptions.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\ttargets_manager \"github.com/openconfig/gnmic/pkg/collector/managers/targets\"\n)\n\nfunc (s *Server) handleConfigSubscriptionsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\tif id == \"\" {\n\t\t// Get all subscriptions\n\t\tsubscriptions, err := s.store.Config.List(\"subscriptions\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(subscriptions)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\n\t// Get single subscription by ID\n\tsub, ok, err := s.store.Config.Get(\"subscriptions\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"subscription not found\"}})\n\t\treturn\n\t}\n\terr = json.NewEncoder(w).Encode(sub)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\n// sample body:\n//\n//\t{\n//\t\t\"name\": \"subscription1\",\n//\t\t\"prefix\": \"interfaces\",\n//\t\t\"set-target\": true,\n//\t\t\"paths\": [\"interfaces/interface/state\"],\n//\t\t\"mode\": \"STREAM\",\n//\t\t\"stream-mode\": \"TARGET_DEFINED\",\n//\t\t\"encoding\": \"JSON\",\n//\t\t\"sample-interval\": 1000\n//\t}\n//\n// sample curl command:\n// curl --request POST -H \"Content-Type: application/json\" \\\n// -d '{\"name\": \"subscription1\", \"prefix\": \"interfaces\", \"set-target\": true, \"paths\": [\"interfaces/interface/state\"], \"mode\": \"STREAM\", \"stream-mode\": \"TARGET_DEFINED\", \"encoding\": \"JSON\", \"sample-interval\": 1000}' \\\n// http://localhost:8080/api/v1/subscriptions\nfunc (s *Server) handleConfigSubscriptionsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tm := map[string]any{}\n\terr = json.Unmarshal(body, &m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tsub := new(types.SubscriptionConfig)\n\t// handles time.Duration\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     sub,\n\t\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\terr = decoder.Decode(m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err = s.store.Config.Set(\"subscriptions\", sub.Name, sub)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigSubscriptionsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tok, _, err := s.store.Config.Delete(\"subscriptions\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"subscription not found\"}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\n// SubscriptionResponse represents a subscription with its targets and states\ntype SubscriptionResponse struct {\n\tName    string                      `json:\"name\"`\n\tConfig  *types.SubscriptionConfig   `json:\"config\"`\n\tTargets map[string]*TargetStateInfo `json:\"targets\"`\n}\n\n// TargetStateInfo represents target information for a subscription\ntype TargetStateInfo struct {\n\tName  string `json:\"name\"`\n\tState string `json:\"state\"`\n}\n\n// handleSubscriptionsGet returns runtime subscription information with target states\nfunc (s *Server) handleSubscriptionsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\tif id == \"\" {\n\t\tsubscriptionsMap := make(map[string]*SubscriptionResponse)\n\t\t// build current subscriptions map\n\t\t_, err := s.store.Config.List(\"subscriptions\", func(name string, sub any) bool {\n\t\t\tswitch sub := sub.(type) {\n\t\t\tcase *types.SubscriptionConfig:\n\t\t\t\tsubscriptionsMap[sub.Name] = &SubscriptionResponse{\n\t\t\t\t\tName:    sub.Name,\n\t\t\t\t\tConfig:  sub,\n\t\t\t\t\tTargets: make(map[string]*TargetStateInfo),\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t})\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\t// Collect all subscriptions from targets\n\t\ts.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) {\n\t\t\tsubStates := mt.T.SubscribeClientStates()\n\t\t\tfor name, active := range subStates {\n\t\t\t\tif subscriptionsMap[name] == nil {\n\t\t\t\t\tsubscriptionsMap[name] = &SubscriptionResponse{\n\t\t\t\t\t\tName:    name,\n\t\t\t\t\t\tTargets: make(map[string]*TargetStateInfo),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstate := \"stopped\"\n\t\t\t\tif active {\n\t\t\t\t\tstate = \"running\"\n\t\t\t\t}\n\t\t\t\tsubscriptionsMap[name].Targets[mt.Name] = &TargetStateInfo{\n\t\t\t\t\tName:  mt.Name,\n\t\t\t\t\tState: state,\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\t// Return all subscriptions\n\t\tresponse := make([]*SubscriptionResponse, 0)\n\t\tfor _, sub := range subscriptionsMap {\n\t\t\tresponse = append(response, sub)\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(response)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tsub, ok, err := s.store.Config.Get(\"subscriptions\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"subscription not found\"}})\n\t\treturn\n\t}\n\tswitch sub := sub.(type) {\n\tcase *types.SubscriptionConfig:\n\t\tresponse := &SubscriptionResponse{\n\t\t\tName:    sub.Name,\n\t\t\tConfig:  sub,\n\t\t\tTargets: make(map[string]*TargetStateInfo),\n\t\t}\n\t\ts.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) {\n\t\t\tsubStates := mt.T.SubscribeClientStates()\n\t\t\tactive, exists := subStates[id]\n\t\t\tif !exists {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstate := \"stopped\"\n\t\t\tif active {\n\t\t\t\tstate = \"running\"\n\t\t\t}\n\t\t\tresponse.Targets[mt.Name] = &TargetStateInfo{\n\t\t\t\tName:  mt.Name,\n\t\t\t\tState: state,\n\t\t\t}\n\t\t})\n\t\terr = json.NewEncoder(w).Encode(response)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"unknown subscription type: %T\", sub)}})\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/api/server/targets.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\ttargets_manager \"github.com/openconfig/gnmic/pkg/collector/managers/targets\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nfunc (s *Server) handleConfigTargetsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\ttargets, err := s.store.Config.List(\"targets\")\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(targets)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\ttc, ok, err := s.store.Config.Get(\"targets\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %s not found\", id)}})\n\t\treturn\n\t}\n\terr = json.NewEncoder(w).Encode(tc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\n// sample body:\n//\n//\t{\n//\t\t\"name\": \"target1\",\n//\t\t\"address\": \"127.0.0.1:57400\",\n//\t\t\"username\": \"admin\",\n//\t\t\"password\": \"admin\"\n//\t}\n//\n// sample curl command:\n// curl --request POST -H \"Content-Type: application/json\" \\\n// -d '{\"name\": \"target1\", \"address\": \"127.0.0.1:57400\", \"username\": \"admin\", \"password\": \"admin\", \"insecure\": true}' \\\n// http://localhost:8080/api/v1/config/targets\nfunc (s *Server) handleConfigTargetsPost(w http.ResponseWriter, r *http.Request) {\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tm := map[string]any{}\n\terr = json.Unmarshal(body, &m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\ttc := new(types.TargetConfig)\n\t// handles time.Duration\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     tc,\n\t\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\terr = decoder.Decode(m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif tc.Name == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target name is required\"}})\n\t\treturn\n\t}\n\tif tc.Address == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target address is required\"}})\n\t\treturn\n\t}\n\t// validate subscriptions\n\tfor _, sub := range tc.Subscriptions {\n\t\t_, ok, err := s.store.Config.Get(\"subscriptions\", sub)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"subscription %s not found\", sub)}})\n\t\t\treturn\n\t\t}\n\t}\n\t// validate outputs\n\tfor _, out := range tc.Outputs {\n\t\t_, ok, err := s.store.Config.Get(\"outputs\", out)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"output %s not found\", out)}})\n\t\t\treturn\n\t\t}\n\t}\n\n\t_, err = s.store.Config.Set(\"targets\", tc.Name, tc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\n// update target subscriptions by sending a PATCH request to the target id\n// sample body:\n//\n//\t{\n//\t\t\"subscriptions\": [\"sub1\", \"sub2\"]\n//\t}\n//\n// sample curl command:\n// curl --request PATCH -H \"Content-Type: application/json\" \\\n// -d '[\"sub1\", \"sub2\"]' \\\n// http://localhost:8080/api/v1/config/targets/target1/subscriptions\nfunc (s *Server) handleConfigTargetsSubscriptionsPatch(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\tsubs := []string{}\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, &subs)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// ensure subscriptions exist\n\tfor _, sub := range subs {\n\t\t_, ok, err := s.store.Config.Get(\"subscriptions\", sub)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"subscription %s not found\", sub)}})\n\t\t\treturn\n\t\t}\n\t}\n\t_, err = s.store.Config.SetFn(\"targets\", id,\n\t\tfunc(v any) (any, error) {\n\t\t\ttc, ok := v.(*types.TargetConfig)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"malformed target config\")\n\t\t\t}\n\t\t\ttc.Subscriptions = subs\n\t\t\treturn tc, nil\n\t\t})\n\tif err != nil {\n\t\tif errors.Is(err, store.ErrKeyNotFound) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %s not found\", id)}})\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\n// update target outputs by sending a PATCH request to the target id\n// sample body:\n//\n//\t{\n//\t\t\"outputs\": [\"output1\", \"output2\"]\n//\t}\n//\n// sample curl command:\n// curl --request PATCH -H \"Content-Type: application/json\" \\\n// -d '[\"output1\", \"output2\"]' \\\n// http://localhost:8080/api/v1/config/targets/target1/outputs\nfunc (s *Server) handleConfigTargetsOutputsPatch(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\touts := []string{}\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, &outs)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t}\n\t// ensure outputs exist\n\tfor _, out := range outs {\n\t\t_, ok, err := s.store.Config.Get(\"outputs\", out)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"output %s not found\", out)}})\n\t\t\treturn\n\t\t}\n\t}\n\t_, err = s.store.Config.SetFn(\"targets\", id,\n\t\tfunc(v any) (any, error) {\n\t\t\ttc, ok := v.(*types.TargetConfig)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"malformed target config\")\n\t\t\t}\n\t\t\ttc.Outputs = outs\n\t\t\treturn tc, nil\n\t\t})\n\tif err != nil {\n\t\tif errors.Is(err, store.ErrKeyNotFound) {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %s not found\", id)}})\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigTargetsDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\t_, _, err := s.store.Config.Delete(\"targets\", id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\ntype TargetResponse struct {\n\tName   string                 `json:\"name\"`\n\tConfig *types.TargetConfig    `json:\"config\"`\n\tState  *collstore.TargetState `json:\"state,omitempty\"`\n}\n\nfunc (s *Server) handleTargetsGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tresponse := make([]*TargetResponse, 0)\n\tif id == \"\" {\n\t\ts.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) {\n\t\t\tts := s.targetsManager.GetTargetState(mt.Name)\n\t\t\tresponse = append(response, targetResponseFromState(mt.Name, mt.T.Config, ts))\n\t\t})\n\t\terr := json.NewEncoder(w).Encode(response)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tmt := s.targetsManager.Lookup(id)\n\tif mt == nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target not found\"}})\n\t\treturn\n\t}\n\tts := s.targetsManager.GetTargetState(id)\n\tresponse = append(response, targetResponseFromState(mt.Name, mt.T.Config, ts))\n\terr := json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\n// targetResponseFromState builds a TargetResponse from a TargetState.\nfunc targetResponseFromState(name string, cfg *types.TargetConfig, ts *collstore.TargetState) *TargetResponse {\n\treturn &TargetResponse{\n\t\tName:   name,\n\t\tConfig: cfg,\n\t\tState:  ts,\n\t}\n}\n\n// change target state to running/stopped by sending a POST request to the target id\nfunc (s *Server) handleTargetsStatePost(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target id is required\"}})\n\t\treturn\n\t}\n\tstate := vars[\"state\"]\n\tif state == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target state is required\"}})\n\t\treturn\n\t}\n\tmt := s.targetsManager.Lookup(id)\n\tif mt == nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target not found\"}})\n\t\treturn\n\t}\n\tok := s.targetsManager.SetIntendedState(id, state)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{\"target state not changed\"}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "pkg/collector/api/server/tunnel_target_match.go",
    "content": "package apiserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\nconst (\n\ttunnelTargetMatchesPath = \"tunnel-target-matches\"\n)\n\nfunc (s *Server) handleConfigTunnelTargetMatchesGet(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tif id == \"\" {\n\t\ttargets, err := s.store.Config.List(tunnelTargetMatchesPath)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(w).Encode(targets)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\ttc, ok, err := s.store.Config.Get(tunnelTargetMatchesPath, id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %s not found\", id)}})\n\t\treturn\n\t}\n\terr = json.NewEncoder(w).Encode(tc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n}\n\nfunc (s *Server) handleConfigTunnelTargetMatchesPost(w http.ResponseWriter, r *http.Request) {\n\tdec := json.NewDecoder(r.Body)\n\tdefer r.Body.Close()\n\tvar m map[string]any\n\tif err := dec.Decode(&m); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\ttc := new(config.TunnelTargetMatch)\n\tif err := mapstructure.Decode(m, tc); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\t_, err := s.store.Config.Set(tunnelTargetMatchesPath, tc.ID, tc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (s *Server) handleConfigTunnelTargetMatchesDelete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\tok, _, err := s.store.Config.Delete(tunnelTargetMatchesPath, id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}})\n\t\treturn\n\t}\n\tif !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf(\"target %s not found\", id)}})\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n"
  },
  {
    "path": "pkg/collector/collector.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage collector\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"log/slog\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/grafana/pyroscope-go\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\tapiserver \"github.com/openconfig/gnmic/pkg/collector/api/server\"\n\tcluster_manager \"github.com/openconfig/gnmic/pkg/collector/managers/cluster\"\n\tinputs_manager \"github.com/openconfig/gnmic/pkg/collector/managers/inputs\"\n\toutputs_manager \"github.com/openconfig/gnmic/pkg/collector/managers/outputs\"\n\ttargets_manager \"github.com/openconfig/gnmic/pkg/collector/managers/targets\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultPipelineBufferSize = 1_000_000\n\tinitLockerRetryTimer      = 1 * time.Second\n)\n\ntype Collector struct {\n\tctx   context.Context\n\tstore *collstore.Store\n\n\tapiServer *apiserver.Server\n\tcache     cache.Cache\n\n\tlocker         lockers.Locker\n\tclusterManager *cluster_manager.ClusterManager\n\ttargetsManager *targets_manager.TargetsManager\n\toutputsManager *outputs_manager.OutputsManager\n\tinputsManager  *inputs_manager.InputsManager\n\tpipeline       chan *pipeline.Msg\n\twg             *sync.WaitGroup\n\n\tlogger   *slog.Logger\n\treg      *prometheus.Registry\n\tprofiler *pyroscope.Profiler\n}\n\nfunc New(ctx context.Context, configStore store.Store[any]) *Collector {\n\ts := collstore.NewStore(configStore)\n\tpipeline := make(chan *pipeline.Msg, defaultPipelineBufferSize)\n\treg := prometheus.NewRegistry()\n\n\tclusterManager := cluster_manager.NewClusterManager(s)\n\ttargetsManager := targets_manager.NewTargetsManager(ctx, s, pipeline, reg)\n\toutputsManager := outputs_manager.NewOutputsManager(ctx, s, pipeline, reg)\n\tinputsManager := inputs_manager.NewInputsManager(ctx, s, pipeline)\n\tapiServer := apiserver.NewServer(\n\t\ts,\n\t\ttargetsManager, outputsManager,\n\t\tinputsManager, clusterManager,\n\t\treg,\n\t)\n\tc := &Collector{\n\t\tctx:            ctx,\n\t\tstore:          s,\n\t\tapiServer:      apiServer,\n\t\tclusterManager: clusterManager,\n\t\ttargetsManager: targetsManager,\n\t\toutputsManager: outputsManager,\n\t\tinputsManager:  inputsManager,\n\t\tpipeline:       pipeline,\n\t\twg:             new(sync.WaitGroup),\n\t\treg:            reg,\n\t}\n\treturn c\n}\n\nfunc (c *Collector) Start() error {\n\tc.logger = logging.NewLogger(c.store.Config, \"component\", \"collector\")\n\tvar err error\n\tc.logger.Info(\"starting collector\")\n\t// build locker\n\tfor {\n\t\terr = c.getLocker()\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"failed to get locker\", \"error\", err)\n\t\t\ttime.Sleep(initLockerRetryTimer)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif c.locker != nil {\n\t\t// start cluster manager\n\t\terr = c.clusterManager.Start(c.ctx, c.locker, c.wg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// create cache\n\tc.initCache()\n\t// start managers\n\terr = c.targetsManager.Start(c.locker, c.wg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = c.outputsManager.Start(c.cache, c.wg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = c.inputsManager.Start(c.wg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// start API server\n\terr = c.apiServer.Start(c.locker, c.wg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// wait for context done\n\t<-c.ctx.Done()\n\t// wait for all components to finish\n\tc.wg.Wait()\n\treturn nil\n}\n\nfunc (c *Collector) Stop() {\n\tc.logger.Info(\"stopping collector\")\n\tc.apiServer.Stop()\n\tc.clusterManager.Stop()\n\tc.targetsManager.Stop()\n\tc.outputsManager.Stop()\n\tc.inputsManager.Stop()\n}\n\nfunc (c *Collector) getLocker() error {\n\tclusteringMap, ok, err := c.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn nil\n\t}\n\tclustering, ok := clusteringMap.(*config.Clustering)\n\tif !ok {\n\t\treturn errors.New(\"malformed clustering config\")\n\t}\n\tif clustering == nil {\n\t\treturn nil\n\t}\n\tif lockerType, ok := clustering.Locker[\"type\"]; ok {\n\t\tc.logger.Info(\"starting locker\", \"type\", lockerType)\n\t\tif initializer, ok := lockers.Lockers[lockerType.(string)]; ok {\n\t\t\tlock := initializer()\n\t\t\terr := lock.Init(c.ctx, clustering.Locker, lockers.WithLogger(log.New(os.Stdout, \"\", log.LstdFlags)))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.locker = lock\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"unknown locker type %q\", lockerType)\n\t}\n\treturn errors.New(\"missing locker type field\")\n}\n\nfunc (c *Collector) CollectorPreRunE(cmd *cobra.Command, args []string) error {\n\tif len(args) > 0 {\n\t\treturn fmt.Errorf(\"unknown command %q\", args[0])\n\t}\n\tpyroscopeServerAddress := cmd.Flag(\"pyroscope-server-address\").Value.String()\n\tpyroscopeApplicationName := cmd.Flag(\"pyroscope-application-name\").Value.String()\n\tif pyroscopeServerAddress == \"\" {\n\t\treturn nil\n\t}\n\tvar err error\n\tc.profiler, err = pyroscope.Start(\n\t\tpyroscope.Config{\n\t\t\tApplicationName: pyroscopeApplicationName,\n\t\t\tServerAddress:   pyroscopeServerAddress,\n\t\t\tProfileTypes: []pyroscope.ProfileType{\n\t\t\t\tpyroscope.ProfileInuseObjects,\n\t\t\t\tpyroscope.ProfileAllocObjects,\n\t\t\t\tpyroscope.ProfileInuseSpace,\n\t\t\t\tpyroscope.ProfileAllocSpace,\n\t\t\t\tpyroscope.ProfileGoroutines,\n\t\t\t\tpyroscope.ProfileMutexCount,\n\t\t\t\tpyroscope.ProfileMutexDuration,\n\t\t\t\tpyroscope.ProfileBlockCount,\n\t\t\t\tpyroscope.ProfileBlockDuration,\n\t\t\t},\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Collector) CollectorRunE(cmd *cobra.Command, _ []string) error {\n\tif c.profiler != nil {\n\t\tdefer c.profiler.Stop()\n\t}\n\treturn c.Start()\n}\n\n// InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode\nfunc (c *Collector) InitCollectorFlags(cmd *cobra.Command) {\n\tcmd.ResetFlags()\n\tcmd.Flags().String(\"pyroscope-server-address\", \"\", \"Pyroscope server address\")\n\tcmd.Flags().String(\"pyroscope-application-name\", \"gnmic-collector\", \"Pyroscope application name\")\n}\n\nfunc (c *Collector) initCache() error {\n\tcfg, ok, err := c.store.Config.Get(\"gnmi-server\", \"gnmi-server\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn nil\n\t}\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tswitch cfg := cfg.(type) {\n\tcase *config.GNMIServer:\n\t\tif cfg == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif cfg.Cache == nil {\n\t\t\treturn nil\n\t\t}\n\t\tc.cache, err = cache.New(cfg.Cache, cache.WithLogger(log.New(os.Stdout, \"\", log.LstdFlags)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/collector/env/env.go",
    "content": "package env\n\nimport (\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\nfunc ExpandClusterEnv(clusteringConfig *config.Clustering) {\n\tclusteringConfig.ClusterName = os.ExpandEnv(clusteringConfig.ClusterName)\n\tclusteringConfig.InstanceName = os.ExpandEnv(clusteringConfig.InstanceName)\n\tclusteringConfig.ServiceAddress = os.ExpandEnv(clusteringConfig.ServiceAddress)\n\tfor i := range clusteringConfig.Tags {\n\t\tclusteringConfig.Tags[i] = os.ExpandEnv(clusteringConfig.Tags[i])\n\t}\n\tif clusteringConfig.TLS != nil {\n\t\tclusteringConfig.TLS.CaFile = os.ExpandEnv(clusteringConfig.TLS.CaFile)\n\t\tclusteringConfig.TLS.CertFile = os.ExpandEnv(clusteringConfig.TLS.CertFile)\n\t\tclusteringConfig.TLS.KeyFile = os.ExpandEnv(clusteringConfig.TLS.KeyFile)\n\t}\n\tif clusteringConfig.Locker != nil {\n\t\texpandLockerEnv(clusteringConfig.Locker)\n\t}\n}\n\nfunc ExpandAPIEnv(apiConfig *config.APIServer) {\n\tapiConfig.Address = os.ExpandEnv(apiConfig.Address)\n\tif apiConfig.TLS != nil {\n\t\tapiConfig.TLS.CaFile = os.ExpandEnv(apiConfig.TLS.CaFile)\n\t\tapiConfig.TLS.CertFile = os.ExpandEnv(apiConfig.TLS.CertFile)\n\t\tapiConfig.TLS.KeyFile = os.ExpandEnv(apiConfig.TLS.KeyFile)\n\t\tapiConfig.TLS.ClientAuth = os.ExpandEnv(apiConfig.TLS.ClientAuth)\n\t}\n\tapiConfig.EnableMetrics = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.EnableMetrics))) == \"true\"\n\tapiConfig.EnableProfiling = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.EnableProfiling))) == \"true\"\n\tapiConfig.Debug = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.Debug))) == \"true\"\n\tapiConfig.HealthzDisableLogging = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.HealthzDisableLogging))) == \"true\"\n}\n\nfunc expandLockerEnv(locker map[string]any) {\n\texpandMapEnv(locker)\n}\n\nfunc expandMapEnv(m map[string]any) {\n\tfor f := range m {\n\t\tswitch v := m[f].(type) {\n\t\tcase string:\n\t\t\tm[f] = os.ExpandEnv(v)\n\t\tcase map[string]any:\n\t\t\texpandMapEnv(v)\n\t\t\tm[f] = v\n\t\tcase []any:\n\t\t\tfor i, item := range v {\n\t\t\t\tswitch item := item.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tv[i] = os.ExpandEnv(item)\n\t\t\t\tcase map[string]any:\n\t\t\t\t\texpandMapEnv(item)\n\t\t\t\tcase []any:\n\t\t\t\t\texpandSliceEnv(item)\n\t\t\t\t}\n\t\t\t}\n\t\t\tm[f] = v\n\t\t}\n\t}\n}\n\nfunc expandSliceEnv(s []any) {\n\tfor i, item := range s {\n\t\tswitch item := item.(type) {\n\t\tcase string:\n\t\t\ts[i] = os.ExpandEnv(item)\n\t\tcase map[string]any:\n\t\t\texpandMapEnv(item)\n\t\tcase []any:\n\t\t\texpandSliceEnv(item)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/env/env_test.go",
    "content": "package env_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/collector/env\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\nfunc TestExpandClusterEnv(t *testing.T) {\n\t// Set env vars for expansion tests; restore after.\n\tconst testCluster = \"test-cluster-name\"\n\tconst testInstance = \"test-instance-01\"\n\tconst testAddr = \"0.0.0.0:7890\"\n\tconst testTag = \"region:us-east\"\n\tconst testCa = \"/etc/ssl/ca.pem\"\n\tconst testCert = \"/etc/ssl/cert.pem\"\n\tconst testKey = \"/etc/ssl/key.pem\"\n\tos.Setenv(\"GNMIC_CLUSTER\", testCluster)\n\tos.Setenv(\"GNMIC_INSTANCE\", testInstance)\n\tos.Setenv(\"GNMIC_ADDR\", testAddr)\n\tos.Setenv(\"GNMIC_TAG\", testTag)\n\tos.Setenv(\"GNMIC_CA\", testCa)\n\tos.Setenv(\"GNMIC_CERT\", testCert)\n\tos.Setenv(\"GNMIC_KEY\", testKey)\n\tdefer func() {\n\t\tos.Unsetenv(\"GNMIC_CLUSTER\")\n\t\tos.Unsetenv(\"GNMIC_INSTANCE\")\n\t\tos.Unsetenv(\"GNMIC_ADDR\")\n\t\tos.Unsetenv(\"GNMIC_TAG\")\n\t\tos.Unsetenv(\"GNMIC_CA\")\n\t\tos.Unsetenv(\"GNMIC_CERT\")\n\t\tos.Unsetenv(\"GNMIC_KEY\")\n\t}()\n\n\ttests := []struct {\n\t\tname             string\n\t\tclusteringConfig *config.Clustering\n\t\tvalidate         func(t *testing.T, c *config.Clustering)\n\t}{\n\t\t{\n\t\t\tname:             \"empty_config\",\n\t\t\tclusteringConfig: &config.Clustering{},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.ClusterName != \"\" || c.InstanceName != \"\" || c.ServiceAddress != \"\" {\n\t\t\t\t\tt.Errorf(\"empty config should remain empty\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"literal_strings_unchanged\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName:    \"my-cluster\",\n\t\t\t\tInstanceName:   \"instance-1\",\n\t\t\t\tServiceAddress: \":7890\",\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.ClusterName != \"my-cluster\" || c.InstanceName != \"instance-1\" || c.ServiceAddress != \":7890\" {\n\t\t\t\t\tt.Errorf(\"literal strings should be unchanged\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"cluster_fields_expanded\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName:    \"$GNMIC_CLUSTER\",\n\t\t\t\tInstanceName:   \"$GNMIC_INSTANCE\",\n\t\t\t\tServiceAddress: \"$GNMIC_ADDR\",\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.ClusterName != testCluster || c.InstanceName != testInstance || c.ServiceAddress != testAddr {\n\t\t\t\t\tt.Errorf(\"got cluster=%q instance=%q addr=%q\", c.ClusterName, c.InstanceName, c.ServiceAddress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tags_expanded\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tTags: []string{\"$GNMIC_TAG\", \"literal\", \"${GNMIC_TAG}\"},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif len(c.Tags) != 3 {\n\t\t\t\t\tt.Fatalf(\"len(Tags)=%d\", len(c.Tags))\n\t\t\t\t}\n\t\t\t\tif c.Tags[0] != testTag || c.Tags[1] != \"literal\" || c.Tags[2] != testTag {\n\t\t\t\t\tt.Errorf(\"tags: got %q\", c.Tags)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tls_nil_no_panic\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName: \"c1\",\n\t\t\t\tTLS:         nil,\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.TLS != nil {\n\t\t\t\t\tt.Error(\"TLS should still be nil\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tls_paths_expanded\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName: \"c1\",\n\t\t\t\tTLS: &types.TLSConfig{\n\t\t\t\t\tCaFile:   \"$GNMIC_CA\",\n\t\t\t\t\tCertFile: \"$GNMIC_CERT\",\n\t\t\t\t\tKeyFile:  \"$GNMIC_KEY\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.TLS == nil {\n\t\t\t\t\tt.Fatal(\"TLS should be set\")\n\t\t\t\t}\n\t\t\t\tif c.TLS.CaFile != testCa || c.TLS.CertFile != testCert || c.TLS.KeyFile != testKey {\n\t\t\t\t\tt.Errorf(\"TLS paths: ca=%q cert=%q key=%q\", c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"locker_nil_no_panic\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName: \"c1\",\n\t\t\t\tLocker:      nil,\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.Locker != nil {\n\t\t\t\t\tt.Error(\"Locker should still be nil\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"locker_string_values_expanded\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName: \"c1\",\n\t\t\t\tLocker: map[string]any{\n\t\t\t\t\t\"type\":    \"consul\",\n\t\t\t\t\t\"address\": \"$GNMIC_ADDR\",\n\t\t\t\t\t\"key\":     \"literal\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\tif c.Locker == nil {\n\t\t\t\t\tt.Fatal(\"Locker should be set\")\n\t\t\t\t}\n\t\t\t\tif c.Locker[\"address\"] != testAddr || c.Locker[\"key\"] != \"literal\" {\n\t\t\t\t\tt.Errorf(\"locker: got %v\", c.Locker)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"locker_nested_map_expanded\",\n\t\t\tclusteringConfig: &config.Clustering{\n\t\t\t\tClusterName: \"c1\",\n\t\t\t\tLocker: map[string]any{\n\t\t\t\t\t\"type\": \"consul\",\n\t\t\t\t\t\"opts\": map[string]any{\n\t\t\t\t\t\t\"host\": \"$GNMIC_CLUSTER\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, c *config.Clustering) {\n\t\t\t\topts, _ := c.Locker[\"opts\"].(map[string]any)\n\t\t\t\tif opts == nil || opts[\"host\"] != testCluster {\n\t\t\t\t\tt.Errorf(\"nested locker opts: got %v\", c.Locker[\"opts\"])\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tenv.ExpandClusterEnv(tt.clusteringConfig)\n\t\t\tif tt.validate != nil && tt.clusteringConfig != nil {\n\t\t\t\ttt.validate(t, tt.clusteringConfig)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExpandAPIEnv(t *testing.T) {\n\tconst testAddr = \"127.0.0.1:7890\"\n\tconst testCa = \"/api/ca.pem\"\n\tconst testCert = \"/api/cert.pem\"\n\tconst testKey = \"/api/key.pem\"\n\tconst testClientAuth = \"require\"\n\tos.Setenv(\"API_ADDR\", testAddr)\n\tos.Setenv(\"API_CA\", testCa)\n\tos.Setenv(\"API_CERT\", testCert)\n\tos.Setenv(\"API_KEY\", testKey)\n\tos.Setenv(\"API_CLIENT_AUTH\", testClientAuth)\n\tdefer func() {\n\t\tos.Unsetenv(\"API_ADDR\")\n\t\tos.Unsetenv(\"API_CA\")\n\t\tos.Unsetenv(\"API_CERT\")\n\t\tos.Unsetenv(\"API_KEY\")\n\t\tos.Unsetenv(\"API_CLIENT_AUTH\")\n\t}()\n\n\ttests := []struct {\n\t\tname      string\n\t\tapiConfig *config.APIServer\n\t\tvalidate  func(t *testing.T, a *config.APIServer)\n\t}{\n\t\t{\n\t\t\tname:      \"empty_config\",\n\t\t\tapiConfig: &config.APIServer{},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.Address != \"\" {\n\t\t\t\t\tt.Errorf(\"Address should be empty, got %q\", a.Address)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"address_expanded\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tAddress: \"$API_ADDR\",\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.Address != testAddr {\n\t\t\t\t\tt.Errorf(\"Address: got %q\", a.Address)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"literal_address_unchanged\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tAddress: \":7890\",\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.Address != \":7890\" {\n\t\t\t\t\tt.Errorf(\"Address: got %q\", a.Address)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tls_nil_no_panic\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tAddress: \":7890\",\n\t\t\t\tTLS:     nil,\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.TLS != nil {\n\t\t\t\t\tt.Error(\"TLS should still be nil\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"tls_paths_and_client_auth_expanded\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tAddress: \":7890\",\n\t\t\t\tTLS: &types.TLSConfig{\n\t\t\t\t\tCaFile:     \"$API_CA\",\n\t\t\t\t\tCertFile:   \"$API_CERT\",\n\t\t\t\t\tKeyFile:    \"$API_KEY\",\n\t\t\t\t\tClientAuth: \"$API_CLIENT_AUTH\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.TLS == nil {\n\t\t\t\t\tt.Fatal(\"TLS should be set\")\n\t\t\t\t}\n\t\t\t\tif a.TLS.CaFile != testCa || a.TLS.CertFile != testCert || a.TLS.KeyFile != testKey || a.TLS.ClientAuth != testClientAuth {\n\t\t\t\t\tt.Errorf(\"TLS: ca=%q cert=%q key=%q clientAuth=%q\", a.TLS.CaFile, a.TLS.CertFile, a.TLS.KeyFile, a.TLS.ClientAuth)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"bool_flags_unchanged_true\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tEnableMetrics:         true,\n\t\t\t\tEnableProfiling:       true,\n\t\t\t\tDebug:                 true,\n\t\t\t\tHealthzDisableLogging: true,\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif !a.EnableMetrics || !a.EnableProfiling || !a.Debug || !a.HealthzDisableLogging {\n\t\t\t\t\tt.Errorf(\"bools true: metrics=%v profiling=%v debug=%v healthz=%v\",\n\t\t\t\t\t\ta.EnableMetrics, a.EnableProfiling, a.Debug, a.HealthzDisableLogging)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"bool_flags_unchanged_false\",\n\t\t\tapiConfig: &config.APIServer{\n\t\t\t\tEnableMetrics:         false,\n\t\t\t\tEnableProfiling:       false,\n\t\t\t\tDebug:                 false,\n\t\t\t\tHealthzDisableLogging: false,\n\t\t\t},\n\t\t\tvalidate: func(t *testing.T, a *config.APIServer) {\n\t\t\t\tif a.EnableMetrics || a.EnableProfiling || a.Debug || a.HealthzDisableLogging {\n\t\t\t\t\tt.Errorf(\"bools false: metrics=%v profiling=%v debug=%v healthz=%v\",\n\t\t\t\t\t\ta.EnableMetrics, a.EnableProfiling, a.Debug, a.HealthzDisableLogging)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tenv.ExpandAPIEnv(tt.apiConfig)\n\t\t\tif tt.validate != nil && tt.apiConfig != nil {\n\t\t\t\ttt.validate(t, tt.apiConfig)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/assigner.go",
    "content": "package cluster_manager\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"net/http\"\n\t\"time\"\n\n\tapiconst \"github.com/openconfig/gnmic/pkg/collector/api/const\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n)\n\ntype Assignment struct {\n\tTarget string `json:\"target,omitempty\"`\n\tMember string `json:\"member,omitempty\"`\n}\n\ntype assignmentConfig struct {\n\tAssignments   []*Assignment `json:\"assignments\"`\n\tUnassignments []string      `json:\"unassignments,omitempty\"`\n}\n\ntype Assigner interface {\n\tAssign(ctx context.Context, targetToMember map[string]*Member) error\n\tUnassign(ctx context.Context, member *Member, target ...string) error\n}\n\nconst (\n\thttpScheme    = \"http\"\n\thttpsScheme   = \"https\"\n\tprotocolLabel = \"__protocol\"\n)\n\ntype restAssigner struct {\n\tclient *http.Client\n\tstore  *collstore.Store\n\tlogger *slog.Logger\n}\n\nfunc NewAssigner(store *collstore.Store) Assigner {\n\treturn &restAssigner{\n\t\tstore:  store,\n\t\tlogger: slog.With(\"component\", \"assignment-pusher\"),\n\t\tclient: &http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}\n\nfunc (p *restAssigner) Assign(ctx context.Context, targetToMember map[string]*Member) error {\n\t// TODO: group by address\n\tfor targetName, member := range targetToMember {\n\t\tif member == nil || member.Address == \"\" {\n\t\t\tp.logger.Warn(\"member is nil or address is empty\", \"target\", targetName, \"member\", member)\n\t\t\tcontinue\n\t\t}\n\t\tscheme := GetAPIScheme(member)\n\t\taddress := scheme + \"://\" + member.Address + apiconst.AssignmentsAPIv1URL\n\t\terr := p.assignOne(ctx, address, []*Assignment{\n\t\t\t{\n\t\t\t\tTarget: targetName,\n\t\t\t\tMember: member.ID,\n\t\t\t\t// Epoch: epoch\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *restAssigner) assignOne(ctx context.Context, address string, assignmentSet []*Assignment) error {\n\tb, err := json.Marshal(&assignmentConfig{Assignments: assignmentSet})\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewReader(b))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := p.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode/100 != 2 {\n\t\tbody, _ := io.ReadAll(resp.Body)\n\t\tp.logger.Error(\"failed to assign\", \"address\", address, \"assignmentSet\", assignmentSet, \"status\", resp.Status, \"body\", string(body))\n\t\treturn fmt.Errorf(\"assign: %s\", resp.Status)\n\t}\n\treturn nil\n}\n\nfunc (p *restAssigner) Unassign(ctx context.Context, member *Member, target ...string) error {\n\tif member == nil || member.Address == \"\" {\n\t\treturn fmt.Errorf(\"member is nil or address is empty\")\n\t}\n\tscheme := GetAPIScheme(member)\n\taddress := scheme + \"://\" + member.Address + apiconst.AssignmentsAPIv1URL\n\tbody, err := json.Marshal(&assignmentConfig{Unassignments: target})\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := p.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody, err = io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode/100 != 2 {\n\t\treturn fmt.Errorf(\"unassign: %s: %s\", resp.Status, string(body))\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/cluster_manager.go",
    "content": "package cluster_manager\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"maps\"\n\t\"net\"\n\t\"net/http\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"golang.org/x/sync/semaphore\"\n\n\tapiconst \"github.com/openconfig/gnmic/pkg/collector/api/const\"\n\t\"github.com/openconfig/gnmic/pkg/collector/env\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tprotocolTagName          = \"__protocol\"\n\tretryRegistrationBackoff = 2 * time.Second\n)\n\ntype ClusterManager struct {\n\tstore            *collstore.Store\n\tclusteringConfig *config.Clustering\n\tapiConfig        *config.APIServer\n\n\tlocker lockers.Locker\n\n\telection           Election\n\trecampaignCooldown atomic.Int64\n\n\tmembership       Membership\n\tassigner         Assigner\n\tlockCheckLimiter chan struct{}\n\n\t// semaphore to limit the number of concurrent rebalancing operations (to 1)\n\trebalancingSem *semaphore.Weighted\n\n\tapiClient *http.Client\n\n\tmm      *sync.RWMutex\n\tmembers map[string]*Member\n\n\tlogger *slog.Logger\n\twg     *sync.WaitGroup\n\tcfn    context.CancelFunc\n}\n\nfunc NewClusterManager(store *collstore.Store) *ClusterManager {\n\treturn &ClusterManager{\n\t\tstore:            store,\n\t\tmm:               new(sync.RWMutex),\n\t\tmembers:          make(map[string]*Member),\n\t\tlocker:           nil,\n\t\tlockCheckLimiter: make(chan struct{}, 64), // TODO: make this configurable\n\t\trebalancingSem:   semaphore.NewWeighted(1),\n\t\tapiClient:        &http.Client{Timeout: 10 * time.Second}, // TODO:\n\t}\n}\n\nfunc (c *ClusterManager) Start(ctx context.Context, locker lockers.Locker, wg *sync.WaitGroup) error {\n\tc.locker = locker\n\tc.logger = logging.NewLogger(c.store.Config, \"component\", \"cluster-manager\")\n\tctx, cfn := context.WithCancel(ctx)\n\tc.cfn = cfn\n\tc.wg = wg\n\t//get clustring config from store\n\tclusteringConfig, ok, err := c.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn nil\n\t}\n\tclustering, ok := clusteringConfig.(*config.Clustering)\n\tif !ok {\n\t\treturn nil\n\t}\n\tif clustering == nil {\n\t\treturn nil\n\t}\n\tc.clusteringConfig = clustering\n\tenv.ExpandClusterEnv(c.clusteringConfig)\n\n\tapiConfig, ok, err := c.store.Config.Get(\"api-server\", \"api-server\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn nil\n\t}\n\tapi, ok := apiConfig.(*config.APIServer)\n\tif !ok {\n\t\treturn errors.New(\"missing api-server config when clustring is enabled\")\n\t}\n\tif api == nil {\n\t\treturn errors.New(\"missing api-server config when clustring is enabled\")\n\t}\n\tc.apiConfig = api\n\tenv.ExpandAPIEnv(c.apiConfig)\n\n\tc.logger.Info(\"starting cluster manager\")\n\n\tc.election, err = NewElection(c.locker, clustering, c.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.membership = NewMembership(c.locker, clustering, c.logger)\n\tc.assigner = NewAssigner(c.store)\n\n\t// start registration to register the api service\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := c.startRegistration(ctx); err != nil {\n\t\t\t\tc.logger.Error(\"registration failed\", \"err\", err)\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(retryRegistrationBackoff):\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// startRegistration should block until ctx.Done(); when it returns, exit.\n\t\t\treturn\n\t\t}\n\t}()\n\n\twg.Add(1)\n\t// run election campaign to grab the leader lock\n\t// when grabbed, start leader duties\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr := c.runCampaign(ctx)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"runCampaign exited with error\", \"error\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (c *ClusterManager) Stop() error {\n\tif c.cfn != nil {\n\t\tc.cfn()\n\t}\n\treturn nil\n}\n\nfunc (c *ClusterManager) runCampaign(ctx context.Context) error {\n\tbackoff := time.Second\n\n\tfor {\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Cooldown after an API triggered withdraw\n\t\tif wait := c.recampaignCooldown.Load(); wait > 0 {\n\t\t\tc.logger.Info(\"waiting for cooldown\", \"cooldown\", time.Duration(wait))\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil\n\t\t\tcase <-time.After(time.Duration(wait)):\n\t\t\t}\n\t\t\t// reset\n\t\t\tc.recampaignCooldown.Store(0)\n\t\t}\n\n\t\tterm, err := c.election.Campaign(ctx)\n\t\tif err != nil {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tc.logger.Error(\"failed to campaign\", \"error\", err)\n\t\t\ttime.Sleep(backoff)\n\t\t\tcontinue\n\t\t}\n\n\t\tc.logger.Info(\"became leader\", \"term\", term, \"node\", c.clusteringConfig.InstanceName, \"cluster\", c.clusteringConfig.ClusterName)\n\t\t// Leader session context\n\t\tleaderCtx, leaderCancel := context.WithCancel(ctx)\n\t\tcancelLeader := func() {\n\t\t\tleaderCancel()\n\t\t\t// TODO: any extra cleanups?\n\t\t}\n\n\t\t// Start leader duties\n\t\tgo func() {\n\t\t\tif err := c.runLeader(leaderCtx); err != nil && leaderCtx.Err() == nil {\n\t\t\t\tc.logger.Error(\"runLeader exited with error\", \"err\", err)\n\t\t\t}\n\t\t}()\n\n\t\t// this blocks until leadership is lost or we’re shutting down.\n\t\tlost := c.election.Observe(ctx)\n\t\tif lost != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcancelLeader()\n\t\t\t\treturn nil\n\t\t\tcase <-lost:\n\t\t\t\tc.logger.Warn(\"leadership lost\", \"term\", term)\n\t\t\t\tcancelLeader()\n\t\t\t}\n\t\t} else {\n\t\t\t// Shouldn't happen\n\t\t\tc.logger.Warn(\"Observe returned nil channel; cancelling leader\")\n\t\t\tcancelLeader()\n\t\t}\n\n\t\ttime.Sleep(backoff) // small backoff before campaigning again\n\t}\n}\n\nfunc (c *ClusterManager) startRegistration(ctx context.Context) error {\n\tc.logger.Info(\"starting registration\", \"address\", c.apiConfig.Address)\n\taddr, port, _ := net.SplitHostPort(c.apiConfig.Address)\n\tp, _ := strconv.Atoi(port)\n\n\ttags := make([]string, 0, 2+len(c.clusteringConfig.Tags))\n\ttags = append(tags, fmt.Sprintf(\"cluster-name=%s\", c.clusteringConfig.ClusterName))\n\ttags = append(tags, fmt.Sprintf(\"instance-name=%s\", c.clusteringConfig.InstanceName))\n\tif c.apiConfig.TLS != nil {\n\t\ttags = append(tags, protocolTagName+\"=https\")\n\t} else {\n\t\ttags = append(tags, protocolTagName+\"=http\")\n\t}\n\ttags = append(tags, c.clusteringConfig.Tags...)\n\n\taddress := c.clusteringConfig.ServiceAddress\n\tif address == \"\" {\n\t\taddress = addr\n\t}\n\tderegister, err := c.membership.Register(ctx, c.clusteringConfig.ClusterName,\n\t\t&Registration{\n\t\t\tID:      c.clusteringConfig.InstanceName,\n\t\t\tAddress: address,\n\t\t\tPort:    p,\n\t\t\tLabels:  tags,\n\t\t})\n\tdefer deregister()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// runLeader executes as long as this node is the elected leader.\n// It continuously reconciles cluster state: assigns targets to nodes,\n// verifies ownership via locks, and updates assignments in the store.\nfunc (c *ClusterManager) runLeader(ctx context.Context) error {\n\tc.logger.Info(\"starting leader duties\")\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-time.After(c.clusteringConfig.LeaderWaitTimer):\n\t\tbreak\n\t}\n\n\t// watch membership (other nodes joining/leaving)\n\tmembersCh, cancelMembers, err := c.membership.Watch(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start watching membership: %w\", err)\n\t}\n\tdefer cancelMembers()\n\n\t// watch targets\n\ttargetsCh, cancelTargets, err := c.store.Config.Watch(\"targets\") // no initial replay\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to watch targets: %w\", err)\n\t}\n\tdefer cancelTargets()\n\n\t// ticker for periodic reconciliation of target assignments\n\ttargetsWatchTicker := time.NewTicker(c.clusteringConfig.TargetsWatchTimer)\n\tdefer targetsWatchTicker.Stop()\n\n\t// intial membership sync\n\tmembers, err := c.membership.GetMembers(ctx)\n\tif err != nil {\n\t\tc.logger.Error(\"failed to get members\", \"error\", err) //log error but continue\n\t} else {\n\t\t// initial reconcile\n\t\tc.mm.Lock()\n\t\tc.members = members\n\t\tc.mm.Unlock()\n\t\tif err := c.reconcileAssignments(ctx, members); err != nil {\n\t\t\tc.logger.Error(\"reconcile assignments failed\", \"error\", err)\n\t\t}\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tc.logger.Info(\"stopping leader duties\")\n\t\t\treturn nil\n\t\tcase members, ok := <-membersCh:\n\t\t\tif !ok {\n\t\t\t\tc.logger.Warn(\"membership watcher closed\") // happens only when explicitly closed\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tc.logger.Info(\"membership update\", \"members\", members)\n\t\t\tc.mm.Lock()\n\t\t\tc.members = members\n\t\t\tc.mm.Unlock()\n\t\tcase targets, ok := <-targetsCh:\n\t\t\tif !ok {\n\t\t\t\tc.logger.Warn(\"targets watcher closed\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tc.logger.Info(\"targets update\", \"targets\", targets)\n\t\t\tswitch targets.EventType {\n\t\t\tcase store.EventTypeCreate:\n\t\t\t\terr := c.handleTargetCreate(ctx, targets.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.logger.Error(\"failed to handle target create\", \"target\", targets.Name, \"error\", err)\n\t\t\t\t}\n\t\t\t// case store.EventTypeUpdate:\n\t\t\t// \tc.handleTarget(ctx, targets.Name)\n\t\t\tcase store.EventTypeDelete:\n\t\t\t\terr := c.handleTargetDelete(ctx, targets.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.logger.Error(\"failed to handle target delete\", \"target\", targets.Name, \"error\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-targetsWatchTicker.C:\n\t\t\t// periodic reconciliation of target assignments\n\t\t\tmembers := c.snapshotMembers()\n\t\t\tif len(members) == 0 {\n\t\t\t\tc.logger.Warn(\"no members, skipping reconciliation\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := c.reconcileAssignments(ctx, members); err != nil {\n\t\t\t\tc.logger.Error(\"reconcile assignments failed\", \"error\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *ClusterManager) WithdrawLeader(ctx context.Context, cooldown time.Duration) error {\n\tc.recampaignCooldown.Store(cooldown.Nanoseconds())\n\treturn c.election.Withdraw()\n}\n\nfunc (c *ClusterManager) IsLeader(ctx context.Context) (bool, error) {\n\tleader, err := c.GetLeaderName(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn leader == c.clusteringConfig.InstanceName, nil\n}\n\nfunc (c *ClusterManager) snapshotMembers() map[string]*Member {\n\tc.mm.RLock()\n\tdefer c.mm.RUnlock()\n\tmembers := make(map[string]*Member, len(c.members))\n\tmaps.Copy(members, c.members)\n\treturn members\n}\n\nfunc (c *ClusterManager) reconcileAssignments(ctx context.Context, members map[string]*Member) error {\n\t// 1. List all known targets\n\ttargets, err := c.store.Config.Keys(\"targets\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(targets) == 0 {\n\t\tc.logger.Info(\"no targets, skipping reconciliation\")\n\t\treturn nil\n\t}\n\t// 2. get current assignments from locker\n\t// target -> holder\n\tcurrentAssignments, err := c.getAssignments(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.logger.Debug(\"current assignments\", \"assignments\", currentAssignments)\n\n\tmembersLoad, err := c.getMembersLoad(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.logger.Debug(\"reconcile assignments members with load\", \"members\", members)\n\tquotas := calculateMembersQuota(members, int64(len(targets)))\n\tc.logger.Debug(\"reconcile assignments quotas\", \"quotas\", quotas)\n\n\t// 3. Decide assignments\n\tassignments := make(map[string]*Member) // targetName -> member\n\n\tfor _, tName := range targets {\n\t\tc.logger.Info(\"reconciling target\", \"target\", tName)\n\t\tcurrentHolder, ok := currentAssignments[tName]\n\t\tif ok {\n\t\t\tif m, ok := members[currentHolder.ID]; ok {\n\t\t\t\tc.logger.Info(\"target already assigned to member\", \"target\", tName, \"member\", m.ID)\n\t\t\t\tassignments[tName] = m\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tc.logger.Warn(\"target lock holder not found\", \"target\", tName, \"holder\", currentHolder)\n\t\t\t}\n\t\t}\n\t\tassigned := pickAssignee(tName, members, quotas, membersLoad)\n\t\tif assigned == nil {\n\t\t\tc.logger.Warn(\"no assignee found for target\", \"target\", tName)\n\t\t\tcontinue\n\t\t}\n\t\tc.logger.Info(\"assigning target\", \"target\", tName, \"assignee\", assigned.ID)\n\t\tassignments[tName] = assigned\n\t}\n\n\t// 4. Publish assignments with assigner\n\terr = c.assigner.Assign(ctx, assignments)\n\tif err != nil {\n\t\tc.logger.Error(\"failed to push assignments\", \"count\", len(assignments), \"error\", err)\n\t}\n\n\t// 5. Optionally verify active locks on assigned targets\n\tfor tName, member := range assignments {\n\t\tc.asyncVerifyLock(ctx, tName, member.ID, time.Now().Add(5*time.Second))\n\t}\n\n\treturn nil\n}\n\nfunc (c *ClusterManager) handleTargetCreate(ctx context.Context, target string, deniedMembers ...string) error {\n\t// 1. get current members with Load populated\n\tcurrentMembers, err := c.GetMembers(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.logger.Debug(\"current members\", \"currentMembers\", currentMembers)\n\n\tfor _, m := range deniedMembers {\n\t\tdelete(currentMembers, m)\n\t}\n\treturn c.assignTarget(ctx, target, currentMembers)\n}\n\n// assignTarget assigns a target to the least loaded member.\n// This is used when a new target is created or when a member is drained from its targets.\nfunc (c *ClusterManager) assignTarget(ctx context.Context, target string, currentMembers map[string]*Member) error {\n\t// 2. find least loaded member\n\tleastLoadedMember := c.getLeastLoadedMember(currentMembers)\n\tif leastLoadedMember == nil {\n\t\treturn fmt.Errorf(\"no least loaded member found\")\n\t}\n\t// 3. assign target to member\n\terr := c.assigner.Assign(ctx, map[string]*Member{target: leastLoadedMember})\n\tif err != nil {\n\t\tc.logger.Error(\"failed to push assignment\", \"target\", target, \"error\", err)\n\t\treturn err\n\t}\n\tleastLoadedMember.Load++\n\tc.asyncVerifyLock(context.Background(), target, leastLoadedMember.ID, time.Now().Add(5*time.Second))\n\treturn nil\n}\n\nfunc (c *ClusterManager) getAssignments(ctx context.Context) (map[string]*Member, error) {\n\tcurrentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := make(map[string]*Member)\n\tc.mm.RLock()\n\tdefer c.mm.RUnlock()\n\n\tfor tName, memberName := range currentAssignments {\n\t\t// normalize targetName\n\t\ttargetName := path.Base(tName)\n\t\tif m, ok := c.members[memberName]; ok {\n\t\t\tres[targetName] = m\n\t\t} else {\n\t\t\t// TODO: unknwon member ?\n\t\t\tc.logger.Warn(\"found unknown member in current assignments\", \"member\", memberName)\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc (c *ClusterManager) getAssignment(ctx context.Context, target string) *Member {\n\tmember, ok := c.targetLockHolder(ctx, target)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn member\n}\n\n// GetMembers returns all members in the cluster.\n// Populates the Load field with the number of locked targets.\nfunc (c *ClusterManager) GetMembers(ctx context.Context) (map[string]*Member, error) {\n\tcurrentMembers := c.snapshotMembers()\n\tif len(currentMembers) == 0 {\n\t\treturn nil, fmt.Errorf(\"no members found\")\n\t}\n\treturn c.populateMemberLoad(ctx, currentMembers)\n\n}\n\nfunc (c *ClusterManager) populateMemberLoad(ctx context.Context, members map[string]*Member) (map[string]*Member, error) {\n\tcurrentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := make(map[string]*Member)\n\t// seed res with current members\n\tfor _, m := range members {\n\t\tres[m.ID] = &Member{\n\t\t\tID:      m.ID,\n\t\t\tAddress: m.Address,\n\t\t\tLabels:  m.Labels,\n\t\t\tLoad:    0,\n\t\t\tTargets: nil,\n\t\t}\n\t}\n\tfor tName, memberName := range currentAssignments {\n\t\t// normalize targetName\n\t\ttargetName := path.Base(tName)\n\t\tif m, ok := members[memberName]; ok {\n\t\t\tif am, ok := res[memberName]; ok {\n\t\t\t\tam.Load++\n\t\t\t\tam.Targets = append(am.Targets, targetName)\n\t\t\t} else {\n\t\t\t\tam := &Member{\n\t\t\t\t\tID:      m.ID,\n\t\t\t\t\tAddress: m.Address,\n\t\t\t\t\tLabels:  m.Labels,\n\t\t\t\t\tLoad:    1,\n\t\t\t\t\tTargets: []string{targetName},\n\t\t\t\t}\n\t\t\t\tres[memberName] = am\n\t\t\t}\n\t\t} else {\n\t\t\tc.logger.Warn(\"found unknown member in current assignments\", \"member\", memberName)\n\t\t}\n\t}\n\treturn res, nil\n}\n\nfunc (c *ClusterManager) getMembersLoad(ctx context.Context) (map[string]int64, error) {\n\tcurrentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres := make(map[string]int64)\n\n\tfor _, memberName := range currentAssignments {\n\t\t_, ok := res[memberName]\n\t\tif !ok {\n\t\t\tres[memberName] = 0\n\t\t}\n\t\tres[memberName]++\n\t}\n\treturn res, nil\n}\n\nfunc (c *ClusterManager) getLeastLoadedMember(assignments map[string]*Member) *Member {\n\tvar leastLoadedMember *Member\n\tfor _, member := range assignments {\n\t\tif leastLoadedMember == nil {\n\t\t\tleastLoadedMember = member\n\t\t\tcontinue\n\t\t}\n\t\tif member.Load < leastLoadedMember.Load {\n\t\t\tleastLoadedMember = member\n\t\t}\n\t}\n\treturn leastLoadedMember\n}\n\nfunc (c *ClusterManager) getMostLoadedMember(members map[string]*Member) *Member {\n\tvar mostLoadedMember *Member\n\tfor _, member := range members {\n\t\tif mostLoadedMember == nil {\n\t\t\tmostLoadedMember = member\n\t\t}\n\t\tif member.Load > mostLoadedMember.Load {\n\t\t\tmostLoadedMember = member\n\t\t}\n\t}\n\treturn mostLoadedMember\n}\n\nfunc (c *ClusterManager) handleTargetDelete(ctx context.Context, target string) error {\n\t// find target assignment\n\tassignedTo := c.getAssignment(ctx, target)\n\tif assignedTo == nil {\n\t\treturn fmt.Errorf(\"target is not assigned to any member\")\n\t}\n\terr := c.assigner.Unassign(ctx, assignedTo, target)\n\tif err != nil {\n\t\tc.logger.Error(\"failed to unassign target\", \"target\", target, \"error\", err)\n\t\treturn err\n\t}\n\t// delete from other instances\n\tc.mm.RLock()\n\tmembers := make(map[string]*Member, len(c.members))\n\tmaps.Copy(members, c.members)\n\tc.mm.RUnlock()\n\t// delete self from members since the initial trigger for this function is the target delete event\n\tdelete(members, assignedTo.ID)\n\terr = c.deleteTargetFromMembers(ctx, target, members)\n\tif err != nil {\n\t\tc.logger.Error(\"failed to delete target from members\", \"target\", target, \"error\", err)\n\t\treturn err\n\t}\n\t// TODO: verify ?\n\treturn nil\n}\n\nfunc (c *ClusterManager) deleteTargetFromMembers(ctx context.Context, target string, members map[string]*Member) error {\n\tfor _, member := range members {\n\t\taddress := getMemberAddress(member)\n\t\turl := address + apiconst.TargetsConfigAPIv1URL + \"/\" + target\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"failed to create request\", \"error\", err)\n\t\t\treturn err\n\t\t}\n\t\tresp, err := c.apiClient.Do(req)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"failed to delete target\", \"error\", err)\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode/100 != 2 {\n\t\t\tc.logger.Error(\"failed to delete target\", \"error\", resp.Status)\n\t\t\treturn fmt.Errorf(\"failed to delete target: %s\", resp.Status)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *ClusterManager) targetLockHolder(ctx context.Context, target string) (*Member, bool) {\n\tholder, ok := holder(ctx, c.locker, targetLockKey(target, c.clusteringConfig.ClusterName))\n\tif !ok || holder == \"\" {\n\t\treturn nil, false\n\t}\n\tc.mm.RLock()\n\tdefer c.mm.RUnlock()\n\tmember, ok := c.members[holder]\n\tif ok {\n\t\treturn member, true\n\t}\n\treturn nil, false\n}\n\nfunc (c *ClusterManager) GetInstanceToTargetsMapping(ctx context.Context) (map[string][]string, error) {\n\tlocks, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trs := make(map[string][]string)\n\tfor k, v := range locks {\n\t\tif _, ok := rs[v]; !ok {\n\t\t\trs[v] = make([]string, 0)\n\t\t}\n\t\trs[v] = append(rs[v], path.Base(k))\n\t}\n\tfor _, ls := range rs {\n\t\tsort.Strings(ls)\n\t}\n\treturn rs, nil\n}\n\nfunc (c *ClusterManager) GetLeaderName(ctx context.Context) (string, error) {\n\tleaderKey := fmt.Sprintf(\"gnmic/%s/leader\", c.clusteringConfig.ClusterName)\n\tleader, err := c.locker.List(ctx, leaderKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(leader) == 0 {\n\t\treturn \"\", nil\n\t}\n\treturn leader[leaderKey], nil\n}\n\nfunc (c *ClusterManager) DrainMember(ctx context.Context, toBeDrained string) error {\n\tmembers, err := c.GetMembers(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.logger.Info(\"members\", \"members\", members)\n\tmemberToDrain, ok := members[toBeDrained]\n\tif !ok {\n\t\treturn fmt.Errorf(\"member to drain not found\")\n\t}\n\n\tif memberToDrain == nil {\n\t\treturn fmt.Errorf(\"member to drain not found\")\n\t}\n\tif len(memberToDrain.Targets) == 0 {\n\t\tc.logger.Info(\"member has no targets\", \"member\", toBeDrained)\n\t\treturn nil\n\t}\n\tc.logger.Info(\"draining member\", \"member\", toBeDrained)\n\tc.logger.Info(\"unassigning targets\", \"targets\", memberToDrain.Targets)\n\terr = c.assigner.Unassign(ctx, memberToDrain, memberToDrain.Targets...)\n\tif err != nil {\n\t\tc.logger.Error(\"failed to unassign targets\", \"member\", toBeDrained, \"error\", err)\n\t\treturn err\n\t}\n\tc.logger.Info(\"unassigned targets\", \"targets\", memberToDrain.Targets)\n\tc.logger.Info(\"deleting member\", \"member\", toBeDrained)\n\tdelete(members, toBeDrained)\n\tfor _, t := range memberToDrain.Targets {\n\t\tc.assignTarget(ctx, t, members)\n\t}\n\treturn nil\n}\n\nfunc (c *ClusterManager) asyncVerifyLock(ctx context.Context, target, expectHolder string, deadline time.Time) {\n\tselect {\n\tcase c.lockCheckLimiter <- struct{}{}: // acquire semaphore\n\tcase <-ctx.Done():\n\t\treturn\n\t}\n\tgo func() {\n\t\tdefer func() {\n\t\t\t<-c.lockCheckLimiter // release semaphore\n\t\t}()\n\t\tkey := targetLockKey(target, c.clusteringConfig.ClusterName)\n\t\tfor {\n\t\t\tif ctx.Err() != nil || time.Now().After(deadline) {\n\t\t\t\tc.logger.Info(\"lock not observed before deadline\",\n\t\t\t\t\t\"target\", target, \"expect\", expectHolder)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tholder, ok := holder(ctx, c.locker, key)\n\t\t\tif ok && holder == expectHolder {\n\t\t\t\tc.logger.Info(\"lock observed\",\n\t\t\t\t\t\"target\", target, \"holder\", holder)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t}\n\t\t}\n\t}()\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/election.go",
    "content": "package cluster_manager\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\t// campaignPeriod        = 1 * time.Second\n\trecampaignBackoff     = 200 * time.Millisecond\n\trecampaignJitterRatio = 0.2\n)\n\ntype Election interface {\n\t// Blocks until this node becomes leader (i.e., acquires the leader lock) or ctx is done.\n\t// Returns a monotonically increasing term for observability/metrics.\n\tCampaign(ctx context.Context) (term int64, err error)\n\t// Closes when leadership is lost (or returns nil if you don't need it).\n\tObserve(ctx context.Context) <-chan struct{} // closes/receives when leadership is lost (optional: return nil if N/A)\n\t// Withdraw withdraws from the leader position\n\tWithdraw() error\n}\n\ntype election struct {\n\tnodeID      string\n\tclusterName string\n\tRenewEvery  time.Duration // renew every (e.g., 1/2 of TTL)\n\tlocker      lockers.Locker\n\tlogger      *slog.Logger\n\t//\n\t// internals\n\tterm            atomic.Int64\n\theld            atomic.Bool\n\tloseOnce        sync.Once\n\tloseCh          chan struct{}\n\tcancelKeepAlive context.CancelFunc\n\n\t// backend-specific release fn for the held lock\n\treleaseFn func() error\n\tmu        sync.Mutex\n}\n\nfunc NewElection(locker lockers.Locker, clustering *config.Clustering, logger *slog.Logger) (Election, error) {\n\tvar renewEvery time.Duration\n\tsTTL, ok := clustering.Locker[\"session-ttl\"]\n\tif ok {\n\t\tswitch st := sTTL.(type) {\n\t\tcase string:\n\t\t\tvar err error\n\t\t\trenewEvery, err = time.ParseDuration(st)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif renewEvery <= 0 {\n\t\t\t\treturn nil, errors.New(\"session-ttl must be greater than 0\")\n\t\t\t}\n\t\t\trenewEvery = renewEvery / 2\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"session-ttl must be a string\")\n\t\t}\n\t} else {\n\t\trenewEvery = 5 * time.Second\n\t}\n\n\treturn &election{\n\t\tlocker:      locker,\n\t\tnodeID:      clustering.InstanceName,\n\t\tclusterName: clustering.ClusterName,\n\t\tRenewEvery:  renewEvery,\n\t\tlogger:      logger,\n\t}, nil\n}\n\nfunc (e *election) Campaign(ctx context.Context) (term int64, err error) {\n\te.logger.Info(\"campaigning for leader\", \"node\", e.nodeID, \"cluster\", e.clusterName)\n\t// reinitialize loseCh for this term\n\te.mu.Lock()\n\te.loseOnce = sync.Once{}\n\te.loseCh = make(chan struct{})\n\te.mu.Unlock()\n\tkey := e.leaderKey()\n\t// try lock\n\t// keep trying until ctx canceled\n\tticker := time.NewTimer(0) // fire immediately first time\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tif !ticker.Stop() {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\te.logger.Info(\"trying to acquire leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", term)\n\t\t// Try to acquire the leader lock\n\t\tok, release, err := tryAcquire(ctx, e.locker, key, []byte(e.nodeID))\n\t\tif err != nil {\n\t\t\te.logger.Error(\"failed to acquire leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", term, \"error\", err)\n\t\t\t// locker error... backoff a bit\n\t\t\tdelay := jittered(recampaignBackoff)\n\t\t\tticker.Reset(delay)\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn 0, ctx.Err()\n\t\t\tcase <-ticker.C:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif ok {\n\t\t\te.logger.Info(\"acquired leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", term)\n\t\t\t// I'm the captain now!\n\t\t\te.mu.Lock()\n\t\t\te.releaseFn = release\n\t\t\te.mu.Unlock()\n\n\t\t\te.held.Store(true)\n\t\t\tterm := e.term.Add(1)\n\n\t\t\t// start keepalive loop bound to this leadership session\n\t\t\tkeepCtx, cancel := context.WithCancel(ctx)\n\t\t\te.cancelKeepAlive = cancel\n\t\t\tgo e.keepalive(keepCtx, key)\n\n\t\t\treturn term, nil\n\t\t}\n\t\te.logger.Info(\"not acquired leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", term)\n\t\t// lock not acquired, add a jitter and retry\n\t\tdelay := jittered(recampaignBackoff)\n\t\tticker.Reset(delay)\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn 0, ctx.Err()\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// Observe closes when this node loses leadership.\n// (Safe to call multiple times, same channel is returned.)\nfunc (e *election) Observe(ctx context.Context) <-chan struct{} {\n\te.mu.Lock()\n\tch := e.loseCh\n\te.mu.Unlock()\n\treturn ch\n}\n\nfunc (e *election) Withdraw() error {\n\tif !e.held.Load() {\n\t\treturn nil\n\t}\n\n\te.mu.Lock()\n\trelease := e.releaseFn\n\te.releaseFn = nil\n\tcancel := e.cancelKeepAlive\n\te.cancelKeepAlive = nil\n\te.mu.Unlock()\n\tif cancel != nil {\n\t\tcancel()\n\t}\n\tif release != nil {\n\t\t_ = release() // ignore error\n\t}\n\n\t// signal loss\n\te.loseOnce.Do(func() {\n\t\te.held.Store(false)\n\t\tif e.loseCh != nil {\n\t\t\tclose(e.loseCh)\n\t\t}\n\t})\n\n\te.logger.Info(\"leadership withdrawn\", \"term\", e.term.Load(), \"node\", e.nodeID, \"cluster\", e.clusterName)\n\n\treturn nil\n}\n\nfunc (e *election) leaderKey() string {\n\treturn fmt.Sprintf(\"gnmic/%s/leader\", e.clusterName)\n}\n\n// keepalive periodically renews the lock and detects loss.\n// On failure (or if the holder changes), it signals loss and cleans up.\nfunc (e *election) keepalive(ctx context.Context, key string) {\n\tt := time.NewTicker(e.RenewEvery)\n\tdefer t.Stop()\n\te.logger.Info(\"starting keepalive loop\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", e.term.Load())\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\te.logger.Info(\"renewing leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", e.term.Load())\n\t\t\t// Renew our lease,if that fails or another node took over, we lost leadership.\n\t\t\tif err := renew(ctx, e.locker, key, []byte(e.nodeID)); err != nil {\n\t\t\t\te.signalLoss()\n\t\t\t\treturn\n\t\t\t}\n\t\t\te.logger.Info(\"renewed leader lock\", \"node\", e.nodeID, \"cluster\", e.clusterName, \"term\", e.term.Load())\n\t\t\tif h, ok := holder(ctx, e.locker, key); ok && h != e.nodeID {\n\t\t\t\t// someone else is now the holder → we lost\n\t\t\t\te.signalLoss()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (e *election) signalLoss() {\n\t// release our lock (best effort)\n\te.mu.Lock()\n\trelease := e.releaseFn\n\te.releaseFn = nil\n\te.mu.Unlock()\n\tif release != nil {\n\t\t_ = release() // ignore error, we already lost\n\t}\n\n\t// stop renew loop\n\tif e.cancelKeepAlive != nil {\n\t\te.cancelKeepAlive()\n\t}\n\n\t// signal once\n\te.loseOnce.Do(func() {\n\t\te.held.Store(false)\n\t\tclose(e.loseCh)\n\t})\n\te.logger.Warn(\"lost leadership\", \"term\", e.term.Load(), \"node\", e.nodeID, \"cluster\", e.clusterName)\n}\n\n// tryAcquire tries to acquire key with value=holder and TTL.\n// Returns (true, releaseFn, nil) if acquired, (false, nil, nil) if not acquired, or (false, nil, err) on backend error.\nfunc tryAcquire(ctx context.Context, lk lockers.Locker, key string, holder []byte) (bool, func() error, error) {\n\t// Lock() attempts to acquire the lock, it returns (true,nil) if successful,\n\t// (false,nil) if already locked, or (false,err) if backend error.\n\tok, err := lk.Lock(ctx, key, holder)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\tif !ok {\n\t\t// someone else already holds the lock\n\t\treturn false, nil, nil\n\t}\n\n\t// Start a keepalive session for this lock.\n\tkaCtx, cancel := context.WithCancel(context.Background())\n\tdoneCh, errCh := lk.KeepLock(kaCtx, key)\n\n\t// Release function closes keepalive and unlocks.\n\trelease := func() error {\n\t\tcancel()\n\t\t// drain both channels to avoid goroutine leaks\n\t\tselect {\n\t\tcase <-doneCh:\n\t\tdefault:\n\t\t}\n\t\tselect {\n\t\tcase <-errCh:\n\t\tdefault:\n\t\t}\n\t\treturn lk.Unlock(context.Background(), key)\n\t}\n\n\t// Background watcher: if KeepLock fails (err or done), cancel leadership early.\n\tgo func() {\n\t\tselect {\n\t\tcase <-doneCh:\n\t\t\t// Lock lost gracefully (KeepLock closed)\n\t\t\tcancel()\n\t\tcase err := <-errCh:\n\t\t\t// Renewal failed or backend issue\n\t\t\t_ = err\n\t\t\tcancel()\n\t\tcase <-kaCtx.Done():\n\t\t}\n\t}()\n\n\treturn true, release, nil\n}\n\n// renew refreshes the TTL for a lock we hold.\nfunc renew(ctx context.Context, lk lockers.Locker, key string, holder []byte) error {\n\t// In this Locker API, TTL renewals are managed by KeepLock().\n\t// So \"renew\" doesn’t need to explicitly refresh, just check if lock is still held.\n\theld, err := lk.IsLocked(ctx, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !held {\n\t\treturn fmt.Errorf(\"lock %q lost\", key)\n\t}\n\treturn nil\n}\n\n// holder returns current holder id (stringified from value) if locked.\nfunc holder(ctx context.Context, lk lockers.Locker, key string) (string, bool) {\n\tm, err := lk.List(ctx, key)\n\tif err != nil {\n\t\treturn \"\", false\n\t}\n\t// The Locker.List returns map[string]string{ lockName -> holderID }\n\tif len(m) == 0 {\n\t\treturn \"\", false\n\t}\n\tif v, ok := m[key]; ok {\n\t\treturn v, true\n\t}\n\treturn \"\", false\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/membership.go",
    "content": "package cluster_manager\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tapiServiceName = \"gnmic-api\"\n)\n\ntype Membership interface {\n\tRegister(ctx context.Context, clusterName string, self *Registration) (func() error, error)\n\tGetMembers(ctx context.Context) (map[string]*Member, error)\n\tWatch(ctx context.Context) (<-chan map[string]*Member, func(), error)\n}\n\ntype Registration struct {\n\tID      string   // instance ID\n\tName    string   // service Name\n\tAddress string   // service Address\n\tPort    int      //service port\n\tLabels  []string // labels/tags list\n}\n\ntype Member struct {\n\tID      string   `json:\"id,omitempty\"`\n\tAddress string   `json:\"address,omitempty\"`\n\tLabels  []string `json:\"labels,omitempty\"`\n\tLoad    int64    `json:\"load,omitempty\"` // populated by the cluster manager based on lock count\n\tTargets []string `json:\"targets,omitempty\"`\n}\n\nfunc (m *Member) String() string {\n\tb, _ := json.Marshal(m)\n\treturn string(b)\n}\n\ntype membership struct {\n\tlocker lockers.Locker\n\tlogger *slog.Logger\n\t// clusterName string\n\tconfig *config.Clustering\n}\n\nfunc NewMembership(locker lockers.Locker, config *config.Clustering, logger *slog.Logger) Membership {\n\treturn &membership{locker: locker, logger: logger, config: config}\n}\n\nfunc (m *membership) GetMembers(ctx context.Context) (map[string]*Member, error) {\n\tmembers := make(map[string]*Member)\n\tsrvs, err := m.locker.GetServices(ctx, m.serviceName(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, srv := range srvs {\n\t\tmembers[srv.ID] = &Member{\n\t\t\tID:      srv.ID,\n\t\t\tAddress: srv.Address,\n\t\t\tLabels:  srv.Tags,\n\t\t}\n\t}\n\treturn members, nil\n}\n\nfunc (m *membership) Watch(ctx context.Context) (<-chan map[string]*Member, func(), error) {\n\tlockerCh := make(chan []*lockers.Service)\n\tctx, cancel := context.WithCancel(ctx)\n\tserviceName := m.serviceName()\n\tm.logger.Info(\"watching services\", \"serviceName\", serviceName)\n\tgo m.locker.WatchServices(ctx, serviceName, []string{\"cluster-name=\" + m.config.ClusterName}, lockerCh, m.config.ServicesWatchTimer)\n\n\tch := make(chan map[string]*Member)\n\n\tgo func() {\n\t\tdefer cancel()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase srvs, ok := <-lockerCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmembers := make(map[string]*Member)\n\t\t\t\tfor _, srv := range srvs {\n\t\t\t\t\tmembers[srv.ID] = &Member{ID: srv.ID, Address: srv.Address, Labels: srv.Tags}\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase ch <- members:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch, func() {\n\t\tcancel()\n\t\tclose(ch)\n\t}, nil\n}\n\nfunc (m *membership) Register(ctx context.Context, clusterName string, self *Registration) (func() error, error) {\n\tctx, cancel := context.WithCancel(ctx)\n\terr := m.locker.Register(ctx, &lockers.ServiceRegistration{\n\t\tID:      self.ID,\n\t\tName:    fmt.Sprintf(\"%s-%s\", clusterName, apiServiceName),\n\t\tAddress: self.Address,\n\t\tPort:    self.Port,\n\t\tTags:    self.Labels,\n\t\tTTL:     5 * time.Second, // TODO: make this configurable\n\t})\n\treturn func() error {\n\t\tcancel()\n\t\treturn m.locker.Deregister(self.ID)\n\t}, err\n}\n\nfunc (m *membership) serviceName() string {\n\treturn fmt.Sprintf(\"%s-%s\", m.config.ClusterName, apiServiceName)\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/placement.go",
    "content": "package cluster_manager\n\nimport (\n\t\"hash/fnv\"\n)\n\n// pickAssignee selects the best Member to assign a target to, based on current quota and load.\n// It chooses the member with the most available quota (quota - load), and uses tieBreak for deterministic selection when tied.\n// Updates the membersLoad map to reflect the new assignment and returns the selected Member.\nfunc pickAssignee(\n\ttargetName string,\n\tmembers map[string]*Member,\n\tquotas map[string]int64,\n\tmembersLoad map[string]int64,\n) *Member {\n\tif len(members) == 0 {\n\t\treturn nil\n\t}\n\tvar pick *Member\n\thighestFreeQuota := int64(-1 << 62)\n\tfor _, m := range members {\n\t\ts := quotas[m.ID] - membersLoad[m.ID]\n\t\tif s > highestFreeQuota || (s == highestFreeQuota && tieBreak(targetName, m.ID, pick.ID)) {\n\t\t\tpick = m\n\t\t\thighestFreeQuota = s\n\t\t}\n\t}\n\tmembersLoad[pick.ID]++\n\treturn pick\n}\n\n// tieBreak deterministically chooses between two members for assignment by hashing the combination of targetName and memberID using FNV-1a.\n// If the hashes are equal, it resorts to lexicographical comparison of the member IDs.\nfunc tieBreak(targetName, memberA, memberB string) bool {\n\n\tif memberB == \"\" {\n\t\treturn true\n\t}\n\tha := fnv64(targetName + memberA)\n\thb := fnv64(targetName + memberB)\n\tif ha == hb {\n\t\treturn memberA < memberB\n\t}\n\treturn ha < hb\n}\n\n// fnv64 computes the FNV-1a 64-bit hash for a given string.\nfunc fnv64(s string) uint64 {\n\th := fnv.New64a()\n\t_, _ = h.Write([]byte(s))\n\treturn h.Sum64()\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/rebalance.go",
    "content": "package cluster_manager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"time\"\n)\n\nfunc (c *ClusterManager) RebalanceTargets(ctx context.Context) error {\n\tmembers, err := c.GetMembers(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(members) < 2 {\n\t\treturn fmt.Errorf(\"no members or only one member found\")\n\t}\n\tc.logger.Debug(\"members\", \"members\", members)\n\t// get most loaded and least loaded\n\tmostLoadedMember := c.getMostLoadedMember(members)\n\tif mostLoadedMember == nil {\n\t\treturn fmt.Errorf(\"count not determine most loaded member\")\n\t}\n\tleastLoadedMember := c.getLeastLoadedMember(members)\n\tif leastLoadedMember == nil {\n\t\treturn fmt.Errorf(\"count not determine least loaded member\")\n\t}\n\n\tc.logger.Debug(\"mostLoadedMember\", \"mostLoadedMember\", mostLoadedMember)\n\tc.logger.Debug(\"leastLoadedMember\", \"leastLoadedMember\", leastLoadedMember)\n\t// decide if rebalancing is needed\n\t// if not, return\n\tdiff := mostLoadedMember.Load - leastLoadedMember.Load\n\tif diff < 2 {\n\t\tc.logger.Info(\"rebalancing is not needed\",\n\t\t\t\"mostLoadedMember\", mostLoadedMember.ID,\n\t\t\t\"mostLoadedMemberLoad\", mostLoadedMember.Load,\n\t\t\t\"leastLoadedMember\", leastLoadedMember.ID,\n\t\t\t\"leastLoadedMemberLoad\", leastLoadedMember.Load,\n\t\t)\n\t\treturn nil\n\t}\n\tc.logger.Info(\"rebalancing is needed\",\n\t\t\"mostLoadedMember\", mostLoadedMember.ID,\n\t\t\"mostLoadedMemberLoad\", mostLoadedMember.Load,\n\t\t\"leastLoadedMember\", leastLoadedMember.ID,\n\t\t\"leastLoadedMemberLoad\", leastLoadedMember.Load,\n\t)\n\t// determine the set of targets to move\n\tmoveCount := diff / 2 // TODO: add cap\n\tmoveCount = max(moveCount, leastLoadedMember.Load)\n\tcandidates := append([]string{}, mostLoadedMember.Targets...)\n\trand.Shuffle(len(candidates), func(i, j int) {\n\t\tcandidates[i], candidates[j] = candidates[j], candidates[i]\n\t})\n\ttargetsToMove := candidates[:moveCount]\n\tassignments := make(map[string]*Member)\n\t// unassign the target from the most loaded member\n\tfor _, t := range targetsToMove {\n\t\tc.logger.Info(\"unassigning target\", \"target\", t, \"member\", mostLoadedMember.ID)\n\t\terr = c.assigner.Unassign(ctx, mostLoadedMember, t)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"failed to unassign target\", \"target\", t, \"member\", mostLoadedMember.ID, \"error\", err)\n\t\t\tcontinue\n\t\t}\n\t\tassignments[t] = leastLoadedMember\n\t}\n\tc.logger.Info(\"assignment set\", \"assignments\", assignments, \"member\", leastLoadedMember.ID)\n\terr = c.assigner.Assign(ctx, assignments)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, t := range targetsToMove {\n\t\tc.asyncVerifyLock(ctx, t, leastLoadedMember.ID, time.Now().Add(5*time.Second))\n\t}\n\treturn nil\n}\n\nfunc (c *ClusterManager) RebalanceTargetsV2() error {\n\tif ok := c.rebalancingSem.TryAcquire(1); !ok {\n\t\treturn fmt.Errorf(\"rebalancing already in progress\")\n\t}\n\tgo func() {\n\t\tdefer c.rebalancingSem.Release(1)\n\t\tctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // TODO: configurable\n\t\tdefer cancel()\n\t\tmembers, err := c.GetMembers(ctx)\n\t\tif err != nil {\n\t\t\tc.logger.Error(\"failed to get members\", \"error\", err)\n\t\t}\n\t\trebalancePlan := rebalance(members)\n\t\tc.logger.Info(\"rebalance plan\", \"rebalancePlan\", rebalancePlan)\n\t\tif rebalancePlan == nil {\n\t\t\tc.logger.Info(\"cluster is already balanced\")\n\t\t\treturn\n\t\t}\n\n\t\tif len(rebalancePlan) == 0 {\n\t\t\tc.logger.Info(\"cluster is already balanced\")\n\t\t\treturn\n\t\t}\n\t\t// per member deltas\n\t\tremoveBySrc := map[string][]string{}\n\t\taddByDst := map[string][]string{}\n\t\t// keep target -> current member mapping\n\t\towner := make(map[string]string)\n\t\tfor id, m := range members {\n\t\t\tfor _, t := range m.Targets {\n\t\t\t\towner[t] = id\n\t\t\t}\n\t\t}\n\t\tfor t, dst := range rebalancePlan {\n\t\t\tif srcID, ok := owner[t]; ok && srcID != dst.ID {\n\t\t\t\tremoveBySrc[srcID] = append(removeBySrc[srcID], t)\n\t\t\t\taddByDst[dst.ID] = append(addByDst[dst.ID], t)\n\t\t\t}\n\t\t}\n\t\tc.logger.Info(\"removing targets\", \"removeBySrc\", removeBySrc)\n\t\tfor srcID, ts := range removeBySrc {\n\t\t\terr = c.assigner.Unassign(ctx, members[srcID], ts...)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Error(\"failed to unassign targets\", \"targets\", ts, \"member\", srcID, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tc.logger.Info(\"adding targets\", \"addByDst\", addByDst)\n\t\tfor dstID, ts := range addByDst {\n\t\t\tasg := make(map[string]*Member, len(ts))\n\t\t\tfor _, t := range ts {\n\t\t\t\tasg[t] = members[dstID]\n\t\t\t}\n\t\t\terr = c.assigner.Assign(ctx, asg)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Error(\"assign batch failed\", \"member\", dstID, \"err\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\n// rebalance computes a one-shot plan: target -> newOwner (only moved ones).\n// It never proposes moving the same target twice, and tries to fill each receiver to its quota.\nfunc rebalance(members map[string]*Member) map[string]*Member {\n\t// calculate total load\n\ttotal := int64(0)\n\tfor _, m := range members {\n\t\ttotal += m.Load\n\t}\n\t// determine the quota for each member\n\tq := calculateMembersQuota(members, total)\n\tif len(q) == 0 {\n\t\treturn nil // already balanced\n\t}\n\n\tdonors := make([]*donor, 0)\n\treceivers := make([]*receiver, 0)\n\n\t// deterministic member order\n\tids := make([]string, 0, len(members))\n\tfor id := range members {\n\t\tids = append(ids, id)\n\t}\n\tsort.Strings(ids)\n\n\t// determine \"want\" and \"have\" for each member\n\tfor _, id := range ids {\n\t\tm := members[id]\n\t\twant := q[id]\n\t\thave := m.Load\n\t\tswitch {\n\t\tcase have > want:\n\t\t\t// copy targets & randomize to avoid bias; or choose oldest/cheapest to move\n\t\t\tpool := append([]string(nil), m.Targets...)\n\t\t\trand.Shuffle(len(pool), func(i, j int) { pool[i], pool[j] = pool[j], pool[i] })\n\t\t\tdonors = append(donors, &donor{\n\t\t\t\tid: id,\n\t\t\t\t// m:       m,\n\t\t\t\tsurplus: have - want,\n\t\t\t\tpool:    pool,\n\t\t\t})\n\t\tcase have < want:\n\t\t\treceivers = append(receivers, &receiver{\n\t\t\t\tid: id,\n\t\t\t\t// m: m,\n\t\t\t\tneed: want - have,\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(donors) == 0 || len(receivers) == 0 {\n\t\treturn nil // already balanced\n\t}\n\n\tmoves := make(map[string]*Member)\n\t// determine the best targets to move from each donor\n\tfor _, d := range donors {\n\t\tfor d.surplus > 0 && len(receivers) > 0 {\n\t\t\tr := receivers[0]\n\t\t\tif r.need == 0 { // receiver needs no more\n\t\t\t\treceivers = receivers[1:]\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// find up to k targets eligible for r\n\t\t\tk := min(d.surplus, r.need)\n\t\t\ttaken := int64(0)\n\t\t\t// scan donor pool, pick targets, compact pool as we consume\n\t\t\tw := 0\n\t\t\tfor _, t := range d.pool {\n\t\t\t\tif taken < k {\n\t\t\t\t\tmoves[t] = members[r.id] // move target to receiver\n\t\t\t\t\ttaken++\n\t\t\t\t\t// skip copying this one (removed)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// keep in pool\n\t\t\t\td.pool[w] = t\n\t\t\t\tw++\n\t\t\t}\n\t\t\td.pool = d.pool[:w] // compact pool\n\t\t\td.surplus -= taken  // update surplus\n\t\t\tr.need -= taken     // update need\n\n\t\t\t// if this receiver still needs more, keep it at index 0, otherwise pop it\n\t\t\tif r.need == 0 {\n\t\t\t\treceivers = receivers[1:]\n\t\t\t} else {\n\t\t\t\t// rotate receiver to the back to give others a chance (optional)\n\t\t\t\treceivers = append(receivers[1:], r)\n\t\t\t}\n\n\t\t\t// If donor ran out of eligible candidates before satisfying k, break to next receiver\n\t\t\tif taken == 0 {\n\t\t\t\t// no eligible targets for this receiver, try with next receiver\n\t\t\t\treceivers = append(receivers[1:], r) // rotate\n\t\t\t\tif len(receivers) == 1 {\n\t\t\t\t\t// only one receiver left, but no eligible targets => donor is stuck\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn moves\n}\n\n// calculateMembersQuota calculates the quota for each member based on the total load\n// the quota is the average load per member\n// the remainder is distributed evenly among the members\nfunc calculateMembersQuota(members map[string]*Member, total int64) map[string]int64 {\n\tif total == 0 {\n\t\t// no load, no quota\n\t\tres := make(map[string]int64, len(members))\n\t\tfor id := range members {\n\t\t\tres[id] = 0\n\t\t}\n\t\treturn res\n\t}\n\tif len(members) == 0 {\n\t\treturn nil\n\t}\n\tn := int64(len(members))\n\tbase := total / n\n\trem := total % n\n\tids := make([]string, 0, n)\n\tfor id := range members {\n\t\tids = append(ids, id)\n\t}\n\tsort.Strings(ids)\n\tquota := make(map[string]int64)\n\tfor i, id := range ids {\n\t\tif i < int(rem) {\n\t\t\tquota[id] = base + 1\n\t\t} else {\n\t\t\tquota[id] = base\n\t\t}\n\t}\n\treturn quota\n}\n\ntype donor struct {\n\t// member id\n\tid string\n\t// surplus load\n\tsurplus int64\n\t// copy of targets\n\tpool []string\n}\n\ntype receiver struct {\n\t// member id\n\tid string\n\t// need load\n\tneed int64\n}\n"
  },
  {
    "path": "pkg/collector/managers/cluster/utils.go",
    "content": "package cluster_manager\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc targetsLockPrefix(clusterName string) string {\n\treturn fmt.Sprintf(\"gnmic/%s/targets\", clusterName)\n}\n\nfunc targetLockKey(target, clusterName string) string {\n\treturn fmt.Sprintf(\"gnmic/%s/targets/%s\", clusterName, target)\n}\n\nfunc GetAPIScheme(member *Member) string {\n\tif member == nil {\n\t\treturn httpScheme\n\t}\n\tfor _, lb := range member.Labels {\n\t\tparts := strings.SplitN(lb, \"=\", 2)\n\t\tif len(parts) == 2 && parts[0] == protocolLabel {\n\t\t\tif parts[1] == \"https\" {\n\t\t\t\treturn httpsScheme\n\t\t\t} else {\n\t\t\t\treturn httpScheme\n\t\t\t}\n\t\t}\n\t}\n\treturn httpScheme\n}\n\nfunc getMemberAddress(member *Member) string {\n\treturn GetAPIScheme(member) + \"://\" + member.Address\n}\n\nfunc jittered(d time.Duration) time.Duration {\n\tj := time.Duration(rand.Int63n(int64(float64(d) * recampaignJitterRatio)))\n\treturn d + j\n}\n"
  },
  {
    "path": "pkg/collector/managers/inputs/inputs_manager.go",
    "content": "package inputs_manager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"log/slog\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\ntype ManagedInput struct {\n\tsync.RWMutex\n\tName string\n\tImpl inputs.Input\n\tCfg  map[string]any\n}\n\ntype InputsManager struct {\n\tctx            context.Context\n\tstore          *collstore.Store\n\tinputFactories map[string]inputs.Initializer\n\tpipeline       chan *pipeline.Msg\n\tlogger         *slog.Logger\n\n\tmu              sync.RWMutex\n\tinputs          map[string]*ManagedInput\n\tprocessorsInUse map[string]map[string]struct{} // processor name -> input names\n}\n\nfunc NewInputsManager(ctx context.Context, store *collstore.Store, pipeline chan *pipeline.Msg) *InputsManager {\n\treturn &InputsManager{\n\t\tctx:             ctx,\n\t\tstore:           store,\n\t\tpipeline:        pipeline,\n\t\tinputFactories:  inputs.Inputs,\n\t\tinputs:          map[string]*ManagedInput{},\n\t\tprocessorsInUse: make(map[string]map[string]struct{}),\n\t}\n}\n\nfunc (mgr *InputsManager) Start(wg *sync.WaitGroup) error {\n\tmgr.logger = logging.NewLogger(mgr.store.Config, \"component\", \"inputs-manager\")\n\tmgr.logger.Info(\"starting inputs manager\")\n\tinputsCh, inputsCancel, err := mgr.store.Config.Watch(\"inputs\",\n\t\tstore.WithInitialReplay[any]())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// watch processors config changes (update only)\n\tprocsCh, processorsCancel, err := mgr.store.Config.Watch(\"processors\",\n\t\tstore.WithEventTypes[any](store.EventTypeUpdate))\n\tif err != nil {\n\t\treturn err\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer inputsCancel()\n\t\tdefer processorsCancel()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-mgr.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase ev, ok := <-inputsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmgr.logger.Info(\"got input event\", \"event\", ev)\n\t\t\t\tcfg, ok := ev.Object.(map[string]any)\n\t\t\t\tif !ok {\n\t\t\t\t\tmgr.logger.Error(\"invalid input config\", \"event\", ev)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeCreate:\n\t\t\t\t\tmgr.createInput(ev.Name, cfg)\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\tmgr.updateInput(ev.Name, cfg)\n\t\t\t\tcase store.EventTypeDelete:\n\t\t\t\t\tmgr.DeleteInput(ev.Name)\n\t\t\t\t}\n\t\t\tcase ev, ok := <-procsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcfg, ok := ev.Object.(map[string]any)\n\t\t\t\tif !ok {\n\t\t\t\t\tmgr.logger.Error(\"invalid processor config\", \"event\", ev)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\tmgr.updateProcessor(ev.Name, cfg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc (mgr *InputsManager) Stop() {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tfor _, mi := range mgr.inputs {\n\t\tmgr.setInputState(mi.Name, collstore.StateStopped, \"\")\n\t\terr := mi.Impl.Close()\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to stop input\", \"name\", mi.Name, \"error\", err)\n\t\t}\n\t}\n}\n\nfunc (mgr *InputsManager) createInput(name string, cfg map[string]any) {\n\ttyp, _ := cfg[\"type\"].(string)\n\tf := mgr.inputFactories[typ]\n\tif f == nil {\n\t\tmgr.setInputState(name, collstore.StateFailed, fmt.Sprintf(\"unknown input type: %s\", typ))\n\t\treturn\n\t}\n\timpl := f()\n\tif err := impl.Start(mgr.ctx, name, cfg,\n\t\tinputs.WithLogger(log.New(os.Stdout, \"\", log.LstdFlags)),\n\t\tinputs.WithConfigStore(mgr.store.Config),\n\t\tinputs.WithPipeline(mgr.pipeline),\n\t); err != nil {\n\t\tmgr.setInputState(name, collstore.StateFailed, err.Error())\n\t\treturn\n\t}\n\tprocs := extractProcessors(cfg)\n\tmi := &ManagedInput{Name: name, Impl: impl, Cfg: cfg}\n\tmgr.mu.Lock()\n\tmgr.trackProcessorsInUse(name, procs)\n\tmgr.inputs[name] = mi\n\tmgr.mu.Unlock()\n\tmgr.setInputState(name, collstore.StateRunning, \"\")\n}\n\nfunc (mgr *InputsManager) updateInput(name string, cfg map[string]any) {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tmi, ok := mgr.inputs[name]\n\tif !ok {\n\t\tmgr.createInput(name, cfg)\n\t\treturn\n\t}\n\tmgr.logger.Info(\"updating input\", \"name\", name, \"cfg\", cfg)\n\tmi.Lock()\n\tdefer mi.Unlock()\n\terr := mi.Impl.Update(cfg)\n\tif err != nil {\n\t\tmgr.logger.Error(\"failed to update input\", \"name\", name, \"error\", err)\n\t\treturn\n\t}\n\toldProcs := extractProcessors(mi.Cfg)\n\tnewProcs := extractProcessors(cfg)\n\tmgr.logger.Info(\"tracking input processors in use\", \"name\", name, \"oldProcs\", oldProcs, \"newProcs\", newProcs)\n\tmgr.untrackProcessorsInUse(name, oldProcs)\n\tmgr.trackProcessorsInUse(name, newProcs)\n\tmgr.logger.Info(\"updated input\", \"name\", name, \"cfg\", cfg)\n\tmi.Cfg = cfg\n\tmgr.inputs[name] = mi\n}\n\nfunc (mgr *InputsManager) DeleteInput(name string) error {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tmgr.logger.Info(\"finding input\", \"name\", name)\n\tif mi, ok := mgr.inputs[name]; ok {\n\t\tmgr.logger.Info(\"stopping input\", \"name\", name)\n\t\tmgr.setInputState(name, collstore.StateStopping, \"\")\n\t\terr := mi.Impl.Close()\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to close input\", \"name\", name, \"error\", err)\n\t\t\treturn fmt.Errorf(\"failed to close input: %w\", err)\n\t\t}\n\t\tprocs := extractProcessors(mi.Cfg)\n\t\tmgr.untrackProcessorsInUse(name, procs)\n\t\tmgr.setInputState(name, collstore.StateStopped, \"\")\n\t\tdelete(mgr.inputs, name)\n\t\tmgr.store.Config.Delete(\"inputs\", name)\n\t}\n\tmgr.store.State.Delete(collstore.KindInputs, name)\n\treturn nil\n}\n\nfunc extractProcessors(cfg map[string]any) []string {\n\tv, ok := cfg[\"event-processors\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\tswitch v := v.(type) {\n\tcase []any:\n\t\tout := make([]string, 0, len(v))\n\t\tfor _, it := range v {\n\t\t\tif s, ok := it.(string); ok {\n\t\t\t\tout = append(out, s)\n\t\t\t}\n\t\t}\n\t\treturn out\n\tcase []string:\n\t\treturn v\n\t}\n\n\treturn nil\n}\n\nfunc (mgr *InputsManager) trackProcessorsInUse(in string, procs []string) {\n\tfor _, p := range procs {\n\t\tif mgr.processorsInUse[p] == nil {\n\t\t\tmgr.processorsInUse[p] = make(map[string]struct{})\n\t\t}\n\t\tmgr.processorsInUse[p][in] = struct{}{}\n\t}\n}\n\nfunc (mgr *InputsManager) untrackProcessorsInUse(in string, procs []string) {\n\tfor _, p := range procs {\n\t\tif users, ok := mgr.processorsInUse[p]; ok {\n\t\t\tdelete(users, in)\n\t\t\tif len(users) == 0 {\n\t\t\t\tdelete(mgr.processorsInUse, p)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (mgr *InputsManager) ProcessorInUse(name string) bool {\n\tmgr.mu.RLock()\n\tdefer mgr.mu.RUnlock()\n\tusers, ok := mgr.processorsInUse[name]\n\tif !ok {\n\t\treturn false\n\t}\n\treturn len(users) > 0\n}\n\nfunc (mgr *InputsManager) updateProcessor(name string, cfg map[string]any) {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tfor _, mi := range mgr.inputs {\n\t\terr := mi.updateProcessor(name, cfg)\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to update event processor for input\", \"processorName\", name, \"inputName\", mi.Name, \"error\", err)\n\t\t}\n\t}\n}\n\nfunc (mi *ManagedInput) updateProcessor(name string, cfg map[string]any) error {\n\tmi.Lock()\n\tdefer mi.Unlock()\n\treturn mi.Impl.UpdateProcessor(name, cfg)\n}\n\n// State store helpers\n\nfunc (mgr *InputsManager) setInputState(name, state, failedReason string) {\n\tis := &collstore.InputState{\n\t\tComponentState: collstore.ComponentState{\n\t\t\t// Name:          name,\n\t\t\tIntendedState: collstore.IntendedStateEnabled,\n\t\t\tState:         state,\n\t\t\tFailedReason:  failedReason,\n\t\t\tLastUpdated:   time.Now(),\n\t\t},\n\t}\n\tmgr.store.State.Set(collstore.KindInputs, name, is)\n}\n\n// GetInputState returns the runtime state of an input from the state store.\nfunc (mgr *InputsManager) GetInputState(name string) *collstore.InputState {\n\tv, ok, err := mgr.store.State.Get(collstore.KindInputs, name)\n\tif err != nil || !ok {\n\t\treturn nil\n\t}\n\tis, ok := v.(*collstore.InputState)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn is\n}\n\n// ListInputStates returns all input states from the state store.\nfunc (mgr *InputsManager) ListInputStates() []*collstore.InputState {\n\tstates := make([]*collstore.InputState, 0)\n\tmgr.store.State.List(collstore.KindInputs, func(name string, v any) bool {\n\t\tif is, ok := v.(*collstore.InputState); ok {\n\t\t\tstates = append(states, is)\n\t\t}\n\t\treturn false\n\t})\n\treturn states\n}\n"
  },
  {
    "path": "pkg/collector/managers/outputs/outputs_manager.go",
    "content": "package outputs_manager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"log/slog\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\ntype ManagedOutput struct {\n\tsync.RWMutex\n\tName string\n\tImpl outputs.Output\n\tCfg  map[string]any\n}\n\n// OutputsManager runs outputs.\ntype OutputsManager struct {\n\tctx   context.Context\n\tstore *collstore.Store\n\n\tOutputsFactory map[string]outputs.Initializer\n\tin             <-chan *pipeline.Msg // pipe from targets and/or inputs\n\n\tmu              sync.RWMutex\n\toutputs         map[string]*ManagedOutput\n\tprocessorsInUse map[string]map[string]struct{} // processor name -> output names\n\n\tcache  cache.Cache\n\tlogger *slog.Logger\n\treg    *prometheus.Registry\n\tstats  *outputStats\n}\n\ntype outputStats struct {\n\tmsgCount    *prometheus.CounterVec\n\tmsgCountErr *prometheus.CounterVec\n}\n\nfunc NewOutputsManager(ctx context.Context, store *collstore.Store, pipe <-chan *pipeline.Msg, reg *prometheus.Registry) *OutputsManager {\n\treturn &OutputsManager{\n\t\tctx:             ctx,\n\t\tstore:           store,\n\t\tOutputsFactory:  outputs.Outputs,\n\t\tin:              pipe,\n\t\toutputs:         map[string]*ManagedOutput{},\n\t\tprocessorsInUse: make(map[string]map[string]struct{}),\n\t\tstats:           newOutputStats(),\n\t\treg:             reg,\n\t}\n}\n\nfunc (mgr *OutputsManager) Start(cache cache.Cache, wg *sync.WaitGroup) error {\n\tmgr.logger = logging.NewLogger(mgr.store.Config, \"component\", \"outputs-manager\")\n\tmgr.logger.Info(\"starting outputs manager\")\n\tmgr.cache = cache\n\t// register metrics\n\tmgr.registerMetrics()\n\t// watch outputs config changes\n\toutputCh, outputsCancel, err := mgr.store.Config.Watch(\"outputs\",\n\t\tstore.WithInitialReplay[any]())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// watch processors config changes (update only)\n\tprocsCh, processorsCancel, err := mgr.store.Config.Watch(\"processors\",\n\t\tstore.WithEventTypes[any](store.EventTypeUpdate))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(1)\n\t// forward incoming events to all running outputs\n\t// that are in the list of outputs to write to.\n\tgo mgr.writeLoop(wg)\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer outputsCancel()\n\t\tdefer processorsCancel()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-mgr.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase ev, ok := <-outputCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmgr.logger.Info(\"got output event\", \"event\", ev)\n\t\t\t\tcfg, ok := ev.Object.(map[string]any)\n\t\t\t\tif !ok {\n\t\t\t\t\tmgr.logger.Error(\"invalid output config\", \"event\", ev)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeCreate:\n\t\t\t\t\tmgr.createOutput(ev.Name, cfg)\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\tmgr.updateOutput(ev.Name, cfg)\n\t\t\t\tcase store.EventTypeDelete:\n\t\t\t\t\tmgr.DeleteOutput(ev.Name)\n\t\t\t\t}\n\t\t\tcase ev, ok := <-procsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmgr.logger.Info(\"got processor event\", \"event\", ev)\n\t\t\t\tcfg, ok := ev.Object.(map[string]any)\n\t\t\t\tif !ok {\n\t\t\t\t\tmgr.logger.Error(\"invalid processor config\", \"event\", ev)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\tmgr.updateProcessor(ev.Name, cfg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (mgr *OutputsManager) writeLoop(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-mgr.ctx.Done():\n\t\t\treturn\n\t\tcase e, ok := <-mgr.in:\n\t\t\tif !ok {\n\t\t\t\tmgr.logger.Debug(\"pipeline channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmgr.logger.Debug(\"got pipeline message\", \"message\", e) // Debug\n\t\t\tgo mgr.write(e)\n\t\t\tif mgr.cache != nil {\n\t\t\t\tgo mgr.cache.Write(mgr.ctx, e.Meta[\"subscription-name\"], e.Msg)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (mgr *OutputsManager) write(e *pipeline.Msg) {\n\touts := mgr.getOutputsForTarget(e.Outputs)\n\toutsNames := make([]string, 0, len(outs))\n\tif mgr.logger.Enabled(mgr.ctx, slog.LevelDebug) {\n\t\tfor _, o := range outs {\n\t\t\toutsNames = append(outsNames, o.Name)\n\t\t}\n\t\tmgr.logger.Debug(\"writing msg to outputs\", \"outputs\", outsNames)\n\t}\n\tfor _, mo := range outs {\n\t\tmgr.stats.msgCount.WithLabelValues(mo.Name).Inc()\n\t\tif len(e.Events) > 0 { // from inputs\n\t\t\tfor _, ev := range e.Events {\n\t\t\t\tmo.Impl.WriteEvent(mgr.ctx, ev)\n\t\t\t}\n\t\t} else {\n\t\t\t// from targets or inputs\n\t\t\tmo.Impl.Write(mgr.ctx, e.Msg, e.Meta)\n\t\t}\n\t}\n}\n\nfunc (mgr *OutputsManager) updateProcessor(name string, cfg map[string]any) {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tfor _, mo := range mgr.outputs {\n\t\terr := mo.updateProcessor(name, cfg)\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to update event processor for output\", \"processorName\", name, \"outputName\", mo.Name, \"error\", err)\n\t\t}\n\t}\n}\n\nfunc (mo *ManagedOutput) updateProcessor(name string, cfg map[string]any) error {\n\tmo.Lock()\n\tdefer mo.Unlock()\n\treturn mo.Impl.UpdateProcessor(name, cfg)\n}\n\nfunc (mgr *OutputsManager) getOutputsForTarget(outputs map[string]struct{}) []*ManagedOutput {\n\tmgr.mu.RLock()\n\tdefer mgr.mu.RUnlock()\n\t// all outputs\n\tif len(outputs) == 0 {\n\t\touts := make([]*ManagedOutput, 0, len(mgr.outputs))\n\t\tfor _, mo := range mgr.outputs {\n\t\t\tif mgr.getOutputStateStr(mo.Name) == collstore.StateRunning {\n\t\t\t\touts = append(outs, mo)\n\t\t\t}\n\t\t}\n\t\treturn outs\n\t}\n\t// specific outputs per target\n\touts := make([]*ManagedOutput, 0, len(outputs))\n\tfor name, mo := range mgr.outputs {\n\t\tif _, ok := outputs[name]; !ok || mgr.getOutputStateStr(name) != collstore.StateRunning {\n\t\t\tcontinue\n\t\t}\n\t\touts = append(outs, mo)\n\t}\n\treturn outs\n}\n\nfunc (mgr *OutputsManager) createOutput(name string, cfg map[string]any) {\n\ttyp, _ := cfg[\"type\"].(string)\n\tf := mgr.OutputsFactory[typ]\n\tif f == nil {\n\t\tmgr.logger.Error(\"unknown output type\", \"name\", name, \"type\", typ)\n\t\tmgr.setOutputState(name, collstore.StateFailed, fmt.Sprintf(\"unknown output type: %s\", typ))\n\t\treturn\n\t}\n\timpl := f()\n\n\topts := make([]outputs.Option, 0, 2)\n\topts = append(opts,\n\t\toutputs.WithName(name),\n\t\toutputs.WithConfigStore(mgr.store.Config),\n\t\toutputs.WithLogger(log.New(os.Stdout, \"\", log.LstdFlags)), // temporary logger\n\t)\n\n\tclustering, ok, err := mgr.store.Config.Get(\"global\", \"clustering\")\n\tif err != nil {\n\t\tmgr.logger.Error(\"failed to get clustering for output\", \"name\", name, \"error\", err)\n\t\treturn\n\t}\n\tif ok {\n\t\tclus, ok := clustering.(map[string]any)\n\t\tif cname, cOk := clus[\"cluster-name\"].(string); cOk && ok {\n\t\t\topts = append(opts, outputs.WithClusterName(cname))\n\t\t}\n\t}\n\n\terr = impl.Init(mgr.ctx, name, cfg, opts...)\n\tif err != nil {\n\t\tmgr.logger.Error(\"failed to init output\", \"name\", name, \"error\", err)\n\t\tmgr.setOutputState(name, collstore.StateFailed, err.Error())\n\t\treturn\n\t}\n\tprocs := extractProcessors(cfg)\n\tmo := &ManagedOutput{Name: name, Impl: impl, Cfg: cfg}\n\tmgr.mu.Lock()\n\tmgr.trackProcessorsInUse(name, procs)\n\tmgr.outputs[name] = mo\n\tmgr.mu.Unlock()\n\tmgr.setOutputState(name, collstore.StateRunning, \"\")\n}\n\nfunc (mgr *OutputsManager) updateOutput(name string, cfg map[string]any) {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tmo, ok := mgr.outputs[name]\n\tif !ok {\n\t\tmgr.createOutput(name, cfg)\n\t\treturn\n\t}\n\n\tmgr.logger.Info(\"updating output\", \"name\", name, \"cfg\", cfg)\n\tmo.Lock()\n\tdefer mo.Unlock()\n\terr := mo.Impl.Update(mgr.ctx, cfg)\n\tif err != nil {\n\t\tmgr.logger.Error(\"failed to update output\", \"name\", name, \"error\", err)\n\t\treturn\n\t}\n\toldProcs := extractProcessors(mo.Cfg)\n\tnewProcs := extractProcessors(cfg)\n\tmgr.logger.Info(\"tracking output processors in use\", \"name\", name, \"oldProcs\", oldProcs, \"newProcs\", newProcs)\n\tmgr.untrackProcessorsInUse(name, oldProcs)\n\tmgr.trackProcessorsInUse(name, newProcs)\n\tmgr.logger.Info(\"updated output\", \"name\", name, \"cfg\", cfg)\n\tmo.Cfg = cfg\n\tmgr.outputs[name] = mo\n}\n\nfunc (mgr *OutputsManager) StopOutput(name string) error {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tmgr.logger.Info(\"finding output\", \"name\", name)\n\tif mo, ok := mgr.outputs[name]; ok {\n\t\tmgr.logger.Info(\"stopping output\", \"name\", name)\n\t\tmgr.setOutputState(name, collstore.StateStopping, \"\")\n\t\terr := mo.Impl.Close()\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to close output\", \"name\", name, \"error\", err)\n\t\t\treturn fmt.Errorf(\"failed to close output: %w\", err)\n\t\t}\n\t\tprocs := extractProcessors(mo.Cfg)\n\t\tmgr.untrackProcessorsInUse(name, procs)\n\t\tmgr.setOutputState(name, collstore.StateStopped, \"\")\n\t\tdelete(mgr.outputs, name)\n\t}\n\treturn nil\n}\n\nfunc (mgr *OutputsManager) DeleteOutput(name string) error {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tmgr.logger.Info(\"deleting output\", \"name\", name)\n\tif mo, ok := mgr.outputs[name]; ok {\n\t\tmgr.logger.Info(\"stopping output\", \"name\", name)\n\t\tmgr.setOutputState(name, collstore.StateStopping, \"\")\n\t\terr := mo.Impl.Close()\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to close output\", \"name\", name, \"error\", err)\n\t\t\treturn fmt.Errorf(\"failed to close output: %w\", err)\n\t\t}\n\t\tprocs := extractProcessors(mo.Cfg)\n\t\tmgr.untrackProcessorsInUse(name, procs)\n\t\tmgr.setOutputState(name, collstore.StateStopped, \"\")\n\t\tdelete(mgr.outputs, name)\n\t\tmgr.store.Config.Delete(\"outputs\", name)\n\t}\n\tmgr.store.State.Delete(collstore.KindOutputs, name)\n\treturn nil\n}\n\nfunc (mgr *OutputsManager) Stop() {\n\tmgr.mu.Lock()\n\tdefer mgr.mu.Unlock()\n\tfor _, mo := range mgr.outputs {\n\t\tmgr.setOutputState(mo.Name, collstore.StateStopped, \"\")\n\t\terr := mo.Impl.Close()\n\t\tif err != nil {\n\t\t\tmgr.logger.Error(\"failed to stop output\", \"name\", mo.Name, \"error\", err)\n\t\t}\n\t}\n}\n\nfunc newOutputStats() *outputStats {\n\treturn &outputStats{\n\t\tmsgCount: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"outputs\",\n\t\t\tName:      \"msg_sent_to_output_count\",\n\t\t\tHelp:      \"Number of messages sent to the output\",\n\t\t}, []string{\"name\"}),\n\t\tmsgCountErr: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"outputs\",\n\t\t\tName:      \"msg_failed_to_sent_to_output_count_error\",\n\t\t\tHelp:      \"Number of messages sent to the output with error\",\n\t\t}, []string{\"name\"}),\n\t}\n}\n\nfunc (mgr *OutputsManager) registerMetrics() {\n\tif mgr.reg == nil {\n\t\treturn\n\t}\n\tmgr.reg.MustRegister(mgr.stats.msgCount)\n\tmgr.reg.MustRegister(mgr.stats.msgCountErr)\n}\n\nfunc (mgr *OutputsManager) WriteToCache(ctx context.Context, msg *pipeline.Msg) {\n\tif mgr.cache == nil {\n\t\treturn\n\t}\n\tif msg.Msg == nil {\n\t\treturn\n\t}\n\tswitch msg.Msg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tsubName, ok := msg.Meta[\"subscription-name\"]\n\t\tif !ok || subName == \"\" {\n\t\t\tsubName = \"default\"\n\t\t}\n\t\ttargetName := utils.GetHost(msg.Meta[\"source\"])\n\t\tmgr.cache.Write(ctx, subName, addTargetToMsg(msg.Msg, targetName))\n\t}\n}\n\nfunc addTargetToMsg(msg proto.Message, targetName string) proto.Message {\n\tswitch msg := msg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := msg.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\tif rsp.Update.GetPrefix() == nil {\n\t\t\t\trsp.Update.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\trsp.Update.Prefix.Target = targetName\n\t\t}\n\t}\n\treturn msg\n}\n\nfunc extractProcessors(cfg map[string]any) []string {\n\tv, ok := cfg[\"event-processors\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\tswitch v := v.(type) {\n\tcase []any:\n\t\tout := make([]string, 0, len(v))\n\t\tfor _, it := range v {\n\t\t\tif s, ok := it.(string); ok {\n\t\t\t\tout = append(out, s)\n\t\t\t}\n\t\t}\n\t\treturn out\n\tcase []string:\n\t\treturn v\n\t}\n\n\treturn nil\n}\n\nfunc (mgr *OutputsManager) trackProcessorsInUse(out string, procs []string) {\n\tfor _, p := range procs {\n\t\tif mgr.processorsInUse[p] == nil {\n\t\t\tmgr.processorsInUse[p] = make(map[string]struct{})\n\t\t}\n\t\tmgr.processorsInUse[p][out] = struct{}{}\n\t}\n}\n\nfunc (mgr *OutputsManager) untrackProcessorsInUse(out string, procs []string) {\n\tfor _, p := range procs {\n\t\tif users, ok := mgr.processorsInUse[p]; ok {\n\t\t\tdelete(users, out)\n\t\t\tif len(users) == 0 {\n\t\t\t\tdelete(mgr.processorsInUse, p)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (mgr *OutputsManager) ProcessorInUse(name string) bool {\n\tmgr.mu.RLock()\n\tdefer mgr.mu.RUnlock()\n\tusers, ok := mgr.processorsInUse[name]\n\tif !ok {\n\t\treturn false\n\t}\n\treturn len(users) > 0\n}\n\n// State store helpers\n\nfunc (mgr *OutputsManager) setOutputState(name, state, failedReason string) {\n\tos := &collstore.OutputState{\n\t\tComponentState: collstore.ComponentState{\n\t\t\t// Name:          name,\n\t\t\tIntendedState: collstore.IntendedStateEnabled,\n\t\t\tState:         state,\n\t\t\tFailedReason:  failedReason,\n\t\t\tLastUpdated:   time.Now(),\n\t\t},\n\t}\n\tmgr.store.State.Set(collstore.KindOutputs, name, os)\n}\n\nfunc (mgr *OutputsManager) getOutputStateStr(name string) string {\n\tos := mgr.GetOutputState(name)\n\tif os == nil {\n\t\treturn \"\"\n\t}\n\treturn os.State\n}\n\n// GetOutputState returns the runtime state of an output from the state store.\nfunc (mgr *OutputsManager) GetOutputState(name string) *collstore.OutputState {\n\tv, ok, err := mgr.store.State.Get(collstore.KindOutputs, name)\n\tif err != nil || !ok {\n\t\treturn nil\n\t}\n\tos, ok := v.(*collstore.OutputState)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn os\n}\n\n// ListOutputStates returns all output states from the state store.\nfunc (mgr *OutputsManager) ListOutputStates() []*collstore.OutputState {\n\tstates := make([]*collstore.OutputState, 0)\n\tmgr.store.State.List(collstore.KindOutputs, func(name string, v any) bool {\n\t\tif os, ok := v.(*collstore.OutputState); ok {\n\t\t\tstates = append(states, os)\n\t\t}\n\t\treturn false\n\t})\n\treturn states\n}\n"
  },
  {
    "path": "pkg/collector/managers/targets/cluster.go",
    "content": "package targets_manager\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n)\n\nfunc (tm *TargetsManager) isClustering() (*config.Clustering, bool, error) {\n\tclusterCfg, ok, err := tm.store.Config.Get(\"clustering\", \"clustering\")\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif !ok {\n\t\treturn nil, false, nil\n\t}\n\tclustering, ok := clusterCfg.(*config.Clustering)\n\tif ok {\n\t\treturn clustering, true, nil\n\t}\n\treturn nil, false, nil\n}\n\nfunc (tm *TargetsManager) amIAssigned(name string) bool {\n\tif !tm.incluster {\n\t\treturn true // run all targets in standalone mode\n\t}\n\ttm.mas.RLock()\n\tdefer tm.mas.RUnlock()\n\t_, ok := tm.assignments[name]\n\treturn ok\n}\n\nfunc (tm *TargetsManager) setAssigned(name string, v bool) {\n\ttm.mas.Lock()\n\tif tm.assignments == nil {\n\t\ttm.assignments = map[string]struct{}{}\n\t}\n\tif v {\n\t\ttm.assignments[name] = struct{}{}\n\t} else {\n\t\tdelete(tm.assignments, name)\n\t}\n\ttm.mas.Unlock()\n\t//\n\tif v {\n\t\tcfg, ok, err := tm.store.Config.Get(\"targets\", name)\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to get target\", \"target\", name, \"error\", err)\n\t\t\treturn\n\t\t}\n\t\tif ok {\n\t\t\ttcfg, tok := cfg.(*types.TargetConfig)\n\t\t\tif tok {\n\t\t\t\ttm.apply(name, tcfg)\n\t\t\t} else {\n\t\t\t\ttm.logger.Error(\"target config is not a types.TargetConfig\", \"target\", name, \"config\", cfg)\n\t\t\t}\n\t\t} else {\n\t\t\ttm.logger.Error(\" assignedtarget config not found\", \"target\", name)\n\t\t}\n\t} else {\n\t\ttm.remove(name)\n\t}\n\n}\n\nfunc (tm *TargetsManager) targetLockKey(target string) string {\n\treturn fmt.Sprintf(\"gnmic/%s/targets/%s\", tm.clustering.ClusterName, target)\n}\n"
  },
  {
    "path": "pkg/collector/managers/targets/loader.go",
    "content": "package targets_manager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nfunc (tm *TargetsManager) initLoader(cfg map[string]any) (loaders.TargetLoader, error) {\n\tloaderType, ok := cfg[\"type\"].(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"loader type is required\")\n\t}\n\tfor _, lt := range loaders.LoadersTypes {\n\t\tif lt == loaderType {\n\t\t\tinit, ok := loaders.Loaders[loaderType]\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unknown loader type %q\", loaderType)\n\t\t\t}\n\t\t\tloader := init()\n\t\t\treturn loader, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unknown loader type %q\", loaderType)\n}\n\nfunc (tm *TargetsManager) startLoader(ctx context.Context, loader loaders.TargetLoader) {\n\tch := loader.Start(ctx)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\ttm.logger.Info(\"loader stopped\")\n\t\t\treturn\n\t\tcase targetOp := <-ch:\n\t\t\tfor _, add := range targetOp.Add {\n\t\t\t\t_, err := tm.store.Config.Set(\"targets\", add.Name, add)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttm.logger.Error(\"failed to add target from loader\", \"error\", err, \"target\", add.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, del := range targetOp.Del {\n\t\t\t\t_, _, err := tm.store.Config.Delete(\"targets\", del)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttm.logger.Error(\"failed to delete target from loader\", \"error\", err, \"target\", del)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/managers/targets/metrics.go",
    "content": "package targets_manager\n\nimport (\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\ttargetMetricsUpdatePeriod = 10 * time.Second\n)\n\ntype targetConnectionState int\n\nconst (\n\ttargetConnectionStateUnknown targetConnectionState = iota\n\ttargetConnectionStateIdle\n\ttargetConnectionStateConnecting\n\ttargetConnectionStateReady\n\ttargetConnectionStateTransientFailure\n\ttargetConnectionStateShutdown\n)\n\nconst (\n\ttargetConnectionStateUnknownStr          = \"UNKNOWN\"\n\ttargetConnectionStateIdleStr             = \"IDLE\"\n\ttargetConnectionStateConnectingStr       = \"CONNECTING\"\n\ttargetConnectionStateReadyStr            = \"READY\"\n\ttargetConnectionStateTransientFailureStr = \"TRANSIENT_FAILURE\"\n\ttargetConnectionStateShutdownStr         = \"SHUTDOWN\"\n)\n\nfunc targetConnectionStateFromStr(str string) targetConnectionState {\n\tswitch str {\n\tcase targetConnectionStateUnknownStr:\n\t\treturn targetConnectionStateUnknown\n\tcase targetConnectionStateIdleStr:\n\t\treturn targetConnectionStateIdle\n\tcase targetConnectionStateConnectingStr:\n\t\treturn targetConnectionStateConnecting\n\tcase targetConnectionStateReadyStr:\n\t\treturn targetConnectionStateReady\n\tcase targetConnectionStateTransientFailureStr:\n\t\treturn targetConnectionStateTransientFailure\n\tcase targetConnectionStateShutdownStr:\n\t\treturn targetConnectionStateShutdown\n\t}\n\treturn targetConnectionStateUnknown\n}\n\nfunc (tcs targetConnectionState) String() string {\n\tswitch tcs {\n\tcase targetConnectionStateUnknown:\n\t\treturn targetConnectionStateUnknownStr\n\tcase targetConnectionStateIdle:\n\t\treturn targetConnectionStateIdleStr\n\tcase targetConnectionStateConnecting:\n\t\treturn targetConnectionStateConnectingStr\n\tcase targetConnectionStateReady:\n\t\treturn targetConnectionStateReadyStr\n\tcase targetConnectionStateTransientFailure:\n\t\treturn targetConnectionStateTransientFailureStr\n\tcase targetConnectionStateShutdown:\n\t\treturn targetConnectionStateShutdownStr\n\t}\n\treturn \"\"\n}\n\ntype targetsStats struct {\n\tsubscribeResponseReceived *prometheus.CounterVec\n\tdroppedSubscribeResponses *prometheus.CounterVec\n\tsubscriptionFailedCount   *prometheus.CounterVec\n\ttargetUPMetric            *prometheus.GaugeVec\n\ttargetConnStateMetric     *prometheus.GaugeVec\n}\n\nconst (\n\tsubscriptionRequestErrorTypeUnknown string = \"UNKNOWN\"\n\tsubscriptionRequestErrorTypeCONFIG  string = \"CONFIG_ERROR\"\n\tsubscriptionRequestErrorTypeGRPC    string = \"GRPC_ERROR\"\n)\n\nfunc newTargetsStats() *targetsStats {\n\treturn &targetsStats{\n\t\tsubscribeResponseReceived: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"targets\",\n\t\t\tName:      \"subscribe_response_received_count\",\n\t\t\tHelp:      \"Number of subscribe responses received\",\n\t\t}, []string{\"target\", \"subscription\"}),\n\t\tdroppedSubscribeResponses: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"targets\",\n\t\t\tName:      \"dropped_subscribe_responses_count\",\n\t\t\tHelp:      \"Number of dropped subscribe responses\",\n\t\t}, []string{\"target\", \"subscription\"}),\n\t\tsubscriptionFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"subscribe\",\n\t\t\tName:      \"number_of_failed_subscribe_request_messages_total\",\n\t\t\tHelp:      \"Total number of failed subscribe requests\",\n\t\t}, []string{\"target\", \"subscription\", \"error_type\"}),\n\t\ttargetUPMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"target\",\n\t\t\tName:      \"up\",\n\t\t\tHelp:      \"Has value 1 if the gNMI target is configured; otherwise, 0.\",\n\t\t}, []string{\"name\"}),\n\t\ttargetConnStateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"gnmic\",\n\t\t\tSubsystem: \"target\",\n\t\t\tName:      \"connection_state\",\n\t\t\tHelp:      \"The current gRPC connection state to the target. The value can be one of the following: 0(UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN).\",\n\t\t}, []string{\"name\"}),\n\t}\n}\n\nfunc (tm *TargetsManager) registerMetrics() {\n\ttm.reg.MustRegister(tm.stats.targetUPMetric)\n\ttm.reg.MustRegister(tm.stats.targetConnStateMetric)\n\ttm.reg.MustRegister(tm.stats.subscribeResponseReceived)\n\ttm.reg.MustRegister(tm.stats.droppedSubscribeResponses)\n\ttm.reg.MustRegister(tm.stats.subscriptionFailedCount)\n\n\ttm.mu.RLock()\n\tfor _, mt := range tm.targets {\n\t\ttm.updateTargetMetrics(mt)\n\t}\n\n\ttm.mu.RUnlock()\n\tgo func() {\n\t\tticker := time.NewTicker(targetMetricsUpdatePeriod)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-tm.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\ttm.mu.RLock()\n\t\t\t\tfor _, mt := range tm.targets {\n\t\t\t\t\ttm.updateTargetMetrics(mt)\n\t\t\t\t}\n\t\t\t\ttm.mu.RUnlock()\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (tm *TargetsManager) updateTargetMetrics(mt *ManagedTarget) {\n\tif mt.T == nil {\n\t\ttm.stats.targetUPMetric.WithLabelValues(mt.Name).Set(0)\n\t\ttm.stats.targetConnStateMetric.WithLabelValues(mt.Name).Set(0)\n\t\treturn\n\t}\n\ttm.stats.targetUPMetric.WithLabelValues(mt.Name).Set(1)\n\ttargetConnState := targetConnectionStateFromStr(mt.T.ConnState())\n\ttm.stats.targetConnStateMetric.WithLabelValues(mt.Name).Set(float64(targetConnState))\n}\n"
  },
  {
    "path": "pkg/collector/managers/targets/targets_manager.go",
    "content": "package targets_manager\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"hash/fnv\"\n\t\"log\"\n\t\"log/slog\"\n\t\"maps\"\n\t\"net\"\n\t\"os\"\n\t\"reflect\"\n\t\"slices\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\tapiutils \"github.com/openconfig/gnmic/pkg/api/utils\"\n\tcollstore \"github.com/openconfig/gnmic/pkg/collector/store\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"google.golang.org/grpc\"\n)\n\ntype ManagedTarget struct {\n\tsync.RWMutex\n\tName string\n\tcfg  *types.TargetConfig\n\tT    *target.Target\n\n\ttunServer *tunnel.Server\n\t// reader\n\treaderCtx    context.Context\n\treaderCancel context.CancelFunc\n\n\tmu                   *sync.Mutex\n\treadersCfn           map[string]context.CancelFunc\n\treaderWG             sync.WaitGroup\n\tlastError            string // last error message, protected by mu\n\toutputs              map[string]struct{}\n\tappliedSubscriptions []string\n}\n\nfunc (mt *ManagedTarget) setLastError(msg string) {\n\tmt.mu.Lock()\n\tmt.lastError = msg\n\tmt.mu.Unlock()\n}\n\nfunc (mt *ManagedTarget) getLastError() string {\n\tmt.mu.Lock()\n\tdefer mt.mu.Unlock()\n\treturn mt.lastError\n}\n\nfunc (mt *ManagedTarget) clearLastError() {\n\tmt.mu.Lock()\n\tmt.lastError = \"\"\n\tmt.mu.Unlock()\n}\n\nfunc newManagedTarget(name string, cfg *types.TargetConfig, tunServer *tunnel.Server) *ManagedTarget {\n\tnt := target.NewTarget(cfg)\n\tmt := &ManagedTarget{\n\t\tName:                 name,\n\t\tcfg:                  cfg,\n\t\tT:                    nt,\n\t\ttunServer:            tunServer,\n\t\toutputs:              make(map[string]struct{}, len(cfg.Outputs)),\n\t\tmu:                   new(sync.Mutex),\n\t\treadersCfn:           make(map[string]context.CancelFunc),\n\t\tappliedSubscriptions: make([]string, 0, len(cfg.Subscriptions)),\n\t}\n\tfor _, output := range cfg.Outputs {\n\t\tmt.outputs[output] = struct{}{}\n\t}\n\treturn mt\n}\n\n// TargetsManager owns target lifecycle (connect/stop) and per-target subscriptions hookups (started by SubscriptionsManager).\ntype TargetsManager struct {\n\tctx    context.Context\n\tcancel context.CancelFunc\n\tstore  *collstore.Store\n\t// pipe to outputsManager\n\tout chan *pipeline.Msg\n\t// target state\n\tmu      sync.RWMutex\n\ttargets map[string]*ManagedTarget\n\t// subscriptions\n\tsubscriptions map[string]*types.SubscriptionConfig\n\tts            *tunnelServer\n\tlogger        *slog.Logger\n\tstats         *targetsStats\n\t// clustring\n\tclustering  *config.Clustering\n\tlocker      lockers.Locker\n\tincluster   bool\n\tmas         *sync.RWMutex\n\tassignments map[string]struct{}\n\treg         *prometheus.Registry\n}\n\nfunc NewTargetsManager(ctx context.Context, store *collstore.Store, pipeline chan *pipeline.Msg, reg *prometheus.Registry) *TargetsManager {\n\tctx, cancel := context.WithCancel(ctx)\n\tts := newTunnelServer(store.Config, reg)\n\ttm := &TargetsManager{\n\t\tctx:           ctx,\n\t\tcancel:        cancel,\n\t\tstore:         store,\n\t\tout:           pipeline,\n\t\ttargets:       map[string]*ManagedTarget{},\n\t\tsubscriptions: map[string]*types.SubscriptionConfig{},\n\t\tts:            ts,\n\t\tstats:         newTargetsStats(),\n\t\tmas:           new(sync.RWMutex),\n\t\tassignments:   make(map[string]struct{}),\n\t\treg:           reg,\n\t}\n\ttm.registerMetrics()\n\treturn tm\n}\n\nfunc (tm *TargetsManager) Start(locker lockers.Locker, wg *sync.WaitGroup) error {\n\ttm.logger = logging.NewLogger(tm.store.Config, \"component\", \"targets-manager\")\n\ttm.logger.Info(\"starting targets manager\")\n\ttm.locker = locker\n\tclustering, ok, err := tm.isClustering()\n\tif err != nil {\n\t\treturn err\n\t}\n\ttm.incluster = ok && clustering != nil\n\tif tm.incluster {\n\t\ttm.logger.Info(\"clustering is enabled\", \"clustering\", clustering)\n\t\ttm.clustering = clustering\n\t}\n\n\t// start tunnel server\n\tgo func() {\n\t\terr := tm.ts.startTunnelServer(tm.ctx)\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to start tunnel server\", \"error\", err)\n\t\t}\n\t}()\n\ttm.logger.Info(\"starting targets watcher\")\n\ttargetsCh, targetsCancel, err := tm.store.Config.Watch(\"targets\", store.WithInitialReplay[any]())\n\tif err != nil {\n\t\treturn err\n\t}\n\ttm.logger.Info(\"starting subscriptions watcher\")\n\tsubscriptionsCh, subscriptionsCancel, err := tm.store.Config.Watch(\"subscriptions\", store.WithInitialReplay[any]())\n\tif err != nil {\n\t\treturn err\n\t}\n\tcfg, ok, err := tm.store.Config.Get(\"loader\", \"loader\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get loader config: %w\", err)\n\t}\n\tvar loaderTargetOpCh <-chan *loaders.TargetOperation\n\tvar loaderCfn context.CancelFunc\n\tif ok && cfg != nil {\n\t\tloaderCfg, ok := cfg.(map[string]any)\n\t\tif ok && len(loaderCfg) > 0 {\n\t\t\tloader, err := tm.initLoader(loaderCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = loader.Init(tm.ctx, loaderCfg,\n\t\t\t\tlog.New(os.Stderr, \"\", apiutils.DefaultLoggingFlags), // TODO: use logger\n\t\t\t\tloaders.WithRegistry(tm.reg),\n\t\t\t\tloaders.WithTargetsDefaults(func(tc *types.TargetConfig) error {\n\t\t\t\t\treturn config.SetTargetConfigDefaultsExpandEnv(tm.store.Config, tc)\n\t\t\t\t}),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttm.logger.Info(\"starting loader\", \"loader\", loader)\n\n\t\t\tvar ctx context.Context\n\t\t\tctx, loaderCfn = context.WithCancel(tm.ctx)\n\t\t\tgo tm.startLoader(ctx, loader)\n\t\t}\n\t}\n\n\tvar assignmentsCancel func()\n\tvar assignmentsCh <-chan *store.Event[any]\n\tif clustering != nil {\n\t\ttm.logger.Info(\"clustering is enabled\", \"clustering\", clustering)\n\t\t// watch assignments\n\t\tassignmentsCh, assignmentsCancel, err = tm.store.Config.Watch(\"assignments\", store.WithInitialReplay[any]()) // TODO: no initial replay ?\n\t\tif err != nil {\n\t\t\tif loaderCfn != nil {\n\t\t\t\tloaderCfn()\n\t\t\t}\n\t\t\tsubscriptionsCancel()\n\t\t\ttargetsCancel()\n\t\t\treturn fmt.Errorf(\"failed to watch assignments: %w\", err)\n\t\t}\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer targetsCancel()\n\t\tdefer subscriptionsCancel()\n\t\tdefer func() {\n\t\t\tif loaderCfn != nil {\n\t\t\t\tloaderCfn()\n\t\t\t}\n\t\t}()\n\t\tif clustering != nil {\n\t\t\tdefer assignmentsCancel()\n\t\t}\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-tm.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase ev, ok := <-targetsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttm.logger.Debug(\"got target event\", \"eventType\", ev.EventType, \"name\", ev.Name)\n\t\t\t\tif !tm.amIAssigned(ev.Name) {\n\t\t\t\t\ttm.logger.Debug(\"target is not assigned to this instance\", \"target\", ev.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\ttm.logger.Debug(\"target is assigned to this instance\", \"target\", ev.Name)\n\t\t\t\t}\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeCreate, store.EventTypeUpdate:\n\t\t\t\t\tcfg := ev.Object.(*types.TargetConfig)\n\t\t\t\t\ttm.apply(ev.Name, cfg)\n\t\t\t\t\ttm.stats.targetUPMetric.WithLabelValues(ev.Name).Set(1)\n\t\t\t\tcase store.EventTypeDelete:\n\t\t\t\t\ttm.remove(ev.Name)\n\t\t\t\t\ttm.stats.targetUPMetric.WithLabelValues(ev.Name).Set(0)\n\t\t\t\t\ttm.stats.targetConnStateMetric.WithLabelValues(ev.Name).Set(0)\n\t\t\t\t}\n\t\t\tcase op, ok := <-loaderTargetOpCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttm.logger.Info(\"got loader target operation\", \"operation\", op)\n\t\t\t\tfor _, add := range op.Add {\n\t\t\t\t\t_, err := tm.store.Config.Set(\"targets\", add.Name, add)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttm.logger.Error(\"failed to add target from loader\", \"error\", err, \"target\", add.Name)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, del := range op.Del {\n\t\t\t\t\t_, _, err := tm.store.Config.Delete(\"targets\", del)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttm.logger.Error(\"failed to delete target from loader\", \"error\", err, \"target\", del)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase ev, ok := <-subscriptionsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttm.logger.Info(\"got subscription event\", \"event\", ev, \"objectType\", reflect.TypeOf(ev.Object))\n\t\t\t\tcfg, ok := ev.Object.(*types.SubscriptionConfig)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeCreate:\n\t\t\t\t\ttm.applySubscription(ev.Name, *cfg)\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\ttm.applySubscription(ev.Name, *cfg)\n\t\t\t\tcase store.EventTypeDelete:\n\t\t\t\t\ttm.removeSubscription(ev.Name)\n\t\t\t\t}\n\t\t\tcase ev, ok := <-assignmentsCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttm.logger.Info(\"got assignment event\", \"event\", ev)\n\t\t\t\tswitch ev.EventType {\n\t\t\t\tcase store.EventTypeCreate:\n\t\t\t\t\ttm.setAssigned(ev.Name, true)\n\t\t\t\tcase store.EventTypeUpdate:\n\t\t\t\t\ttm.setAssigned(ev.Name, true) // can this happen? yes if we add epoch/term to assignments\n\t\t\t\tcase store.EventTypeDelete:\n\t\t\t\t\ttm.setAssigned(ev.Name, false)\n\t\t\t\t}\n\t\t\t\tgo tm.reconcileAssignment(ev.Name)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (tm *TargetsManager) Stop() {\n\tif tm.cancel != nil {\n\t\ttm.cancel()\n\t\ttm.cancel = nil\n\t}\n}\n\nfunc (tm *TargetsManager) apply(name string, cfg *types.TargetConfig) {\n\ttm.logger.Info(\"applying target config\", \"name\", name, \"cfg\", cfg)\n\n\tvar mt *ManagedTarget\n\tcreated := false\n\n\tdefer func() {\n\t\ttm.updateTargetMetrics(mt)\n\t}()\n\ttm.mu.Lock()\n\tmt = tm.targets[name]\n\tif mt == nil {\n\t\tmt = newManagedTarget(name, cfg.DeepCopy(), tm.ts.tunServer)\n\t\ttm.targets[name] = mt\n\t\tcreated = true\n\t}\n\ttm.mu.Unlock()\n\n\tif created {\n\t\ttm.logger.Info(\"starting created target\", \"name\", name)\n\t\tmt.Lock()\n\t\tdefer mt.Unlock()\n\t\tif err := tm.start(mt); err != nil {\n\t\t\ttm.logger.Error(\"failed to start target\", \"name\", name, \"error\", err)\n\t\t\tmt.setLastError(err.Error())\n\t\t\ttm.setTargetState(name, collstore.StateFailed)\n\t\t\treturn\n\t\t}\n\t\tmt.clearLastError()\n\t\ttm.setTargetState(name, collstore.StateRunning)\n\t\treturn\n\t}\n\n\tmt.Lock()\n\tdefer mt.Unlock()\n\n\tif mt.T.Config.Equal(cfg) {\n\t\treturn\n\t}\n\ttm.logger.Info(\"target config changed\", \"name\", name, \"old\", mt.T.Config, \"new\", cfg)\n\tif !shouldReconnect(mt.T.Config, cfg) {\n\t\t// subscriptions\n\t\t// compare applied subscriptions with new subscriptions.\n\t\t// !Do not mutate the current config subscriptions list!.\n\t\tif !reflect.DeepEqual(mt.appliedSubscriptions, cfg.Subscriptions) {\n\t\t\ttm.logger.Info(\"subscriptions changed\", \"name\", name, \"old\", mt.T.Config.Subscriptions, \"new\", cfg.Subscriptions)\n\t\t\tif added, removed := tm.compareSubscriptions(mt.T.Config.Subscriptions, cfg.Subscriptions); len(added) > 0 || len(removed) > 0 {\n\t\t\t\ttm.logger.Info(\"subscriptions added\", \"name\", name, \"added\", added)\n\t\t\t\ttm.logger.Info(\"subscriptions removed\", \"name\", name, \"removed\", removed)\n\t\t\t\tfor _, sub := range added {\n\t\t\t\t\ttm.logger.Info(\"starting target subscription\", \"name\", sub, \"target\", name)\n\t\t\t\t\tcfg, exists, err := tm.store.Config.Get(\"subscriptions\", sub)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttm.logger.Error(\"failed to get subscription\", \"name\", sub, \"target\", name, \"error\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif !exists {\n\t\t\t\t\t\ttm.logger.Error(\"subscription not found\", \"name\", sub, \"target\", name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tscfg := cfg.(*types.SubscriptionConfig)\n\t\t\t\t\tscfg.Name = sub\n\t\t\t\t\tmt.appliedSubscriptions = append(mt.appliedSubscriptions, sub)\n\t\t\t\t\terr = tm.startTargetSubscription(mt, scfg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttm.logger.Error(\"failed to start target subscription\", \"name\", sub, \"target\", name, \"error\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, sub := range removed {\n\t\t\t\t\tmt.mu.Lock()\n\t\t\t\t\tcfn, exists := mt.readersCfn[sub]\n\t\t\t\t\tif exists {\n\t\t\t\t\t\tcfn()\n\t\t\t\t\t\tdelete(mt.readersCfn, sub)\n\t\t\t\t\t}\n\t\t\t\t\tmt.mu.Unlock()\n\t\t\t\t\ttm.logger.Info(\"stopping target subscription\", \"name\", sub, \"target\", name)\n\t\t\t\t\tmt.T.StopSubscription(sub)\n\t\t\t\t\tdelete(mt.T.Subscriptions, sub)\n\t\t\t\t\tmt.appliedSubscriptions = slices.DeleteFunc(mt.appliedSubscriptions, func(s string) bool {\n\t\t\t\t\t\treturn s == sub\n\t\t\t\t\t})\n\t\t\t\t\ttm.logger.Info(\"target subscription stopped\", \"name\", sub, \"target\", name)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttm.logger.Info(\"subscriptions unchanged\", \"name\", name, \"old\", mt.T.Config.Subscriptions, \"new\", cfg.Subscriptions)\n\t\t\t}\n\t\t\tmt.T.Config.Subscriptions = cfg.Subscriptions\n\t\t} else {\n\t\t\ttm.logger.Info(\"subscriptions unchanged\", \"name\", name, \"old\", mt.T.Config.Subscriptions, \"new\", cfg.Subscriptions)\n\t\t}\n\t\t// outputs\n\t\tif !reflect.DeepEqual(mt.T.Config.Outputs, cfg.Outputs) {\n\t\t\ttm.logger.Info(\"outputs changed\", \"name\", name, \"old\", mt.T.Config.Outputs, \"new\", cfg.Outputs)\n\t\t\tif added, removed := tm.compareOutputs(mt.T.Config, cfg); len(added) > 0 || len(removed) > 0 {\n\t\t\t\ttm.logger.Info(\"outputs added\", \"name\", name, \"added\", added)\n\t\t\t\ttm.logger.Info(\"outputs removed\", \"name\", name, \"removed\", removed)\n\t\t\t\tfor _, output := range added {\n\t\t\t\t\tmt.outputs[output] = struct{}{}\n\t\t\t\t}\n\t\t\t\tfor _, output := range removed {\n\t\t\t\t\tdelete(mt.outputs, output)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttm.logger.Info(\"outputs unchanged\", \"name\", name, \"old\", mt.T.Config.Outputs, \"new\", cfg.Outputs)\n\t\t\t}\n\t\t\tmt.T.Config.Outputs = cfg.Outputs\n\t\t} else {\n\t\t\ttm.logger.Info(\"outputs unchanged\", \"name\", name, \"old\", mt.T.Config.Outputs, \"new\", cfg.Outputs)\n\t\t}\n\t\treturn\n\t}\n\n\t// simply reconnect\n\terr := tm.stop(mt)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to stop target\", \"name\", name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(name, collstore.StateFailed)\n\t}\n\tmt.T.Config = cfg\n\terr = tm.start(mt)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to start target\", \"name\", name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(name, collstore.StateFailed)\n\t}\n}\n\n// assumes the managed target is locked\nfunc (tm *TargetsManager) start(mt *ManagedTarget) error {\n\ttm.logger.Info(\"starting target\", \"name\", mt.Name)\n\tif tm.getTargetStateStr(mt.Name) == collstore.StateRunning {\n\t\treturn nil\n\t}\n\tmt.clearLastError()\n\ttm.setTargetState(mt.Name, collstore.StateStarting)\n\tctx, cfn := context.WithCancel(tm.ctx)\n\tmt.T.Cfn = cfn\n\n\ttm.logger.Info(\"creating gNMI client\", \"name\", mt.Name)\n\terr := mt.T.CreateGNMIClient(ctx, tm.targetGRPCOpts(ctx, mt)...)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to create gNMI client\", \"name\", mt.Name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(mt.Name, collstore.StateFailed)\n\t\treturn err\n\t}\n\tif tm.locker != nil {\n\t\ttm.logger.Info(\"acquiring lock for target\", \"name\", mt.Name)\n\t\tok, err := tm.locker.Lock(ctx, tm.targetLockKey(mt.Name), []byte(tm.clustering.InstanceName))\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to acquire lock for target\", \"name\", mt.Name, \"error\", err)\n\t\t\tmt.setLastError(err.Error())\n\t\t\ttm.setTargetState(mt.Name, collstore.StateFailed)\n\t\t\t_ = tm.stop(mt)\n\t\t\treturn err\n\t\t}\n\t\tif !ok {\n\t\t\ttm.logger.Error(\"failed to acquire lock for target\", \"name\", mt.Name)\n\t\t\tmt.setLastError(\"lock not acquired\")\n\t\t\ttm.setTargetState(mt.Name, collstore.StateFailed)\n\t\t\t_ = tm.stop(mt)\n\t\t\treturn err\n\t\t}\n\t\t// keep lock\n\t\tgo func() {\n\t\t\tdoneCh, errCh := tm.locker.KeepLock(ctx, tm.targetLockKey(mt.Name))\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-doneCh:\n\t\t\t\t\ttm.logger.Info(\"lock for target released\", \"name\", mt.Name)\n\t\t\t\t\treturn\n\t\t\t\tcase err := <-errCh:\n\t\t\t\t\ttm.logger.Error(\"failed to maintain lock for target\", \"name\", mt.Name, \"error\", err)\n\t\t\t\t\t_ = tm.stop(mt)\n\t\t\t\t\tmt.setLastError(err.Error())\n\t\t\t\t\ttm.setTargetState(mt.Name, collstore.StateFailed)\n\t\t\t\t\treturn\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\ttm.logger.Info(\"lock for target released\", \"name\", mt.Name)\n\t\t\t\t\t_ = tm.stop(mt)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\ttm.logger.Info(\"gNMI client created\", \"name\", mt.Name)\n\ttm.setTargetState(mt.Name, collstore.StateRunning)\n\n\t// Watch gRPC connectivity state changes and keep the state store current.\n\tgo tm.watchConnState(ctx, mt)\n\n\ttm.logger.Info(\"target started\", \"name\", mt.Name)\n\t_, err = mt.T.Capabilities(ctx)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed capabilities request\", \"name\", mt.Name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(mt.Name, collstore.StateFailed)\n\t\treturn err\n\t}\n\ttm.logger.Info(\"capabilities request successful\", \"name\", mt.Name)\n\n\t// start subscriptions\n\tsubs := mt.T.Config.Subscriptions\n\tif len(subs) == 0 {\n\t\t// if target has no explicit subs, attach all known subs\n\t\ttm.mu.RLock()\n\t\tsubs = make([]string, 0, len(tm.subscriptions))\n\t\tfor name := range tm.subscriptions {\n\t\t\tsubs = append(subs, name)\n\t\t}\n\t\ttm.mu.RUnlock()\n\t\t// reflect the effective subs into the target's config so future diffs see them\n\t\tmt.appliedSubscriptions = append(mt.appliedSubscriptions, subs...)\n\t}\n\tfor _, sub := range subs {\n\t\ttm.logger.Info(\"starting target subscription\", \"name\", sub, \"target\", mt.Name)\n\t\ttm.mu.RLock()\n\t\tcfg := tm.subscriptions[sub]\n\t\ttm.mu.RUnlock()\n\t\tif cfg == nil {\n\t\t\tobj, exists, err := tm.store.Config.Get(\"subscriptions\", sub)\n\t\t\tif err != nil {\n\t\t\t\ttm.logger.Error(\"failed to get subscription\", \"name\", sub, \"target\", mt.Name, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !exists {\n\t\t\t\ttm.logger.Error(\"subscription not found\", \"name\", sub, \"target\", mt.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc := obj.(*types.SubscriptionConfig)\n\t\t\tcfg = c\n\t\t}\n\t\tcfg.Name = sub\n\t\terr = tm.startTargetSubscription(mt, cfg)\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to start target subscription\", \"name\", sub, \"target\", mt.Name, \"error\", err)\n\t\t\tcontinue\n\t\t}\n\t}\n\t// Refresh state now that subscriptions have been kicked off.\n\t// Individual subscription goroutines will update state again\n\t// once their SubscribeClients are established.\n\ttm.setTargetState(mt.Name, collstore.StateRunning)\n\treturn nil\n}\n\nfunc (tm *TargetsManager) targetGRPCOpts(ctx context.Context, mt *ManagedTarget) []grpc.DialOption {\n\tif mt.cfg.TunnelTargetType != \"\" {\n\t\treturn []grpc.DialOption{grpc.WithContextDialer(tm.tunDialerFn(ctx, mt))}\n\t}\n\treturn nil\n}\n\nfunc (tm *TargetsManager) tunDialerFn(ctx context.Context, mt *ManagedTarget) func(context.Context, string) (net.Conn, error) {\n\treturn func(_ context.Context, _ string) (net.Conn, error) {\n\t\ttt := tunnel.Target{ID: mt.cfg.Name, Type: mt.cfg.TunnelTargetType}\n\t\tctx, cancel := context.WithTimeout(ctx, mt.cfg.Timeout)\n\t\tdefer cancel()\n\t\tconn, err := tunnel.ServerConn(ctx, tm.ts.tunServer, &tt)\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed dialing tunnel connection for target\", \"name\", mt.Name, \"error\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn conn, nil\n\t}\n}\n\nfunc (tm *TargetsManager) stop(mt *ManagedTarget) error {\n\tif tm.getTargetStateStr(mt.Name) == collstore.StateStopped {\n\t\treturn nil\n\t}\n\tmt.clearLastError()\n\ttm.setTargetState(mt.Name, collstore.StateStopping)\n\n\t// stop reader loop\n\tif mt.readerCancel != nil {\n\t\tmt.readerCancel()\n\t\tmt.readerWG.Wait()\n\t\tmt.readerCancel = nil\n\t}\n\n\t// stop all per-target subscriptions and locker if any\n\tif mt.T.Cfn != nil {\n\t\tmt.T.Cfn()\n\t}\n\ttm.logger.Info(\"closing target\", \"name\", mt.Name)\n\terr := mt.T.Close()\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to close target\", \"name\", mt.Name, \"error\", err)\n\t} else {\n\t\ttm.logger.Info(\"closed target\", \"name\", mt.Name)\n\t}\n\ttm.setTargetState(mt.Name, collstore.StateStopped)\n\tif tm.locker != nil {\n\t\ttm.logger.Info(\"releasing lock for target\", \"name\", mt.Name)\n\t\terr := tm.locker.Unlock(tm.ctx, tm.targetLockKey(mt.Name))\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to release lock for target\", \"name\", mt.Name, \"error\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tm *TargetsManager) remove(name string) {\n\ttm.mu.Lock()\n\tmt := tm.targets[name]\n\tdelete(tm.targets, name)\n\ttm.mu.Unlock()\n\tif mt != nil {\n\t\tmt.Lock()\n\t\t_ = tm.stop(mt)\n\t\tmt.T = nil\n\t\tmt.outputs = nil\n\t\tmt.readerCtx = nil\n\t\tmt.readerCancel = nil\n\t\tmt.Unlock()\n\t}\n\ttm.store.State.Delete(collstore.KindTargets, name)\n}\n\n// apply subscription to all targets that reference it or to those that do not reference any subscription\nfunc (tm *TargetsManager) applySubscription(name string, cfg types.SubscriptionConfig) {\n\ttm.logger.Info(\"applying subscription\", \"name\", name, \"cfg\", cfg)\n\tcfg.Name = name\n\ttm.mu.Lock()\n\ttm.subscriptions[name] = &cfg\n\ttm.logger.Info(\"subscriptions\", \"subscriptions\", tm.subscriptions)\n\tfor _, mt := range tm.targets {\n\t\ttm.logger.Info(\"target\", \"target\", mt.Name, \"subscriptions\", mt.T.Config.Subscriptions)\n\t\tif len(mt.T.Config.Subscriptions) > 0 {\n\t\t\tif !slices.Contains(mt.T.Config.Subscriptions, name) {\n\t\t\t\ttm.logger.Info(\"subscription not in target's explicit list\", \"subscription\", name, \"target\", mt.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ttm.logger.Info(\"(re)starting target subscription\", \"name\", name, \"target\", mt.Name)\n\t\t// Stop and WAIT for the old subscription to fully terminate\n\t\tmt.mu.Lock()\n\t\tcfn, exists := mt.readersCfn[name]\n\t\tif exists {\n\t\t\ttm.logger.Info(\"canceling subscription context\", \"name\", name, \"target\", mt.Name)\n\t\t\tcfn() // Cancel the context\n\t\t\ttm.logger.Info(\"deleted subscription context\", \"name\", name, \"target\", mt.Name)\n\t\t\tdelete(mt.readersCfn, name) // Remove from map\n\t\t}\n\t\tmt.mu.Unlock()\n\t\ttm.logger.Info(\"stopping target subscription\", \"name\", name, \"target\", mt.Name)\n\t\tmt.T.StopSubscription(name)\n\t\ttm.logger.Info(\"stopped target subscription\", \"name\", name, \"target\", mt.Name)\n\t\t// Wait for the reader goroutine to finish\n\t\tmt.T.Subscriptions[name] = &cfg\n\t\terr := tm.startTargetSubscription(mt, &cfg)\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to start target subscription\", \"subscription\", name, \"target\", mt.Name, \"error\", err)\n\t\t}\n\t}\n\ttm.mu.Unlock()\n}\n\n// remove subscription from targets that already reference it and have it running\nfunc (tm *TargetsManager) removeSubscription(name string) {\n\ttm.mu.Lock()\n\tdelete(tm.subscriptions, name)\n\tfor _, mt := range tm.targets {\n\t\tmt.mu.Lock()\n\t\tcfn, exists := mt.readersCfn[name]\n\t\tif exists {\n\t\t\tcfn()\n\t\t\tdelete(mt.readersCfn, name)\n\t\t}\n\t\tmt.mu.Unlock()\n\t\tmt.T.StopSubscription(name)\n\t\tdelete(mt.T.Subscriptions, name)\n\t}\n\ttm.mu.Unlock()\n}\n\nfunc (tm *TargetsManager) reconcileAssignment(name string) {\n\tif !tm.amIAssigned(name) {\n\t\tif mt := tm.Lookup(name); mt != nil && tm.getTargetStateStr(name) == collstore.StateRunning {\n\t\t\t_ = tm.stop(mt)\n\t\t}\n\t\treturn\n\t}\n\t// get targetConfig\n\tcfg, ok := tm.getConfig(name)\n\tif !ok {\n\t\ttm.logger.Info(\"assigned but config not present yet; will retry on next event\", \"target\", name)\n\t\treturn\n\t}\n\t// Ensure ManagedTarget exists\n\ttm.mu.Lock()\n\tmt := tm.targets[name]\n\tif mt == nil {\n\t\tmt = newManagedTarget(name, cfg, tm.ts.tunServer)\n\t\ttm.targets[name] = mt\n\t}\n\ttm.mu.Unlock()\n\n\t// lock managed target\n\tmt.Lock()\n\tdefer mt.Unlock()\n\n\t// check if config has changed\n\tif reflect.DeepEqual(mt.T.Config, cfg) {\n\t\treturn\n\t}\n\n\t// check if should reconnect\n\tshouldReconnect := shouldReconnect(mt.T.Config, cfg)\n\tif !shouldReconnect {\n\t\treturn\n\t}\n\n\t// simply reconnect\n\terr := tm.stop(mt)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to stop target\", \"name\", name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(name, collstore.StateFailed)\n\t}\n\tmt.T.Config = cfg\n\terr = tm.start(mt)\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to start target\", \"name\", name, \"error\", err)\n\t\tmt.setLastError(err.Error())\n\t\ttm.setTargetState(name, collstore.StateFailed)\n\t}\n}\n\nfunc (tm *TargetsManager) getConfig(name string) (*types.TargetConfig, bool) {\n\tv, ok, err := tm.store.Config.Get(\"targets\", name)\n\tif err != nil || !ok || v == nil {\n\t\treturn nil, false\n\t}\n\tcfg, ok := v.(*types.TargetConfig)\n\treturn cfg, ok\n}\n\nfunc (tm *TargetsManager) Lookup(name string) *ManagedTarget {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\treturn tm.targets[name]\n}\n\nfunc (tm *TargetsManager) ForEach(fn func(*ManagedTarget)) {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\tfor _, mt := range tm.targets {\n\t\tfn(mt)\n\t}\n}\n\nfunc (tm *TargetsManager) SetIntendedState(name string, state string) bool {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\tmt := tm.targets[name]\n\tif mt == nil {\n\t\treturn false\n\t}\n\tmt.Lock()\n\tdefer mt.Unlock()\n\n\tcurrentState := tm.getTargetStateStr(name)\n\tswitch state {\n\tcase collstore.IntendedStateEnabled:\n\t\tif currentState == collstore.StateRunning || currentState == collstore.StateStarting {\n\t\t\treturn false\n\t\t}\n\t\t_ = tm.start(mt)\n\tcase collstore.IntendedStateDisabled:\n\t\tif currentState == collstore.StateStopped || currentState == collstore.StateStopping {\n\t\t\treturn false\n\t\t}\n\t\t_ = tm.stop(mt)\n\t}\n\treturn true\n}\n\nfunc (tm *TargetsManager) GetIntendedState(name string) string {\n\tts := tm.GetTargetState(name)\n\tif ts == nil {\n\t\treturn \"\"\n\t}\n\treturn ts.IntendedState\n}\n\nfunc (tm *TargetsManager) startTargetSubscription(mt *ManagedTarget, cfg *types.SubscriptionConfig) error {\n\tvar defaultEncoding = \"json\"\n\tdefaultEncodingVal, exists, err := tm.store.Config.Get(\"globalConfig\", \"defaultEncoding\")\n\tif err != nil {\n\t\ttm.logger.Error(\"failed to get default encoding\", \"error\", err)\n\t\treturn err\n\t}\n\tif exists {\n\t\tvar ok bool\n\t\tdefaultEncoding, ok = defaultEncodingVal.(string)\n\t\tif !ok {\n\t\t\ttm.logger.Error(\"default encoding is not a string\", \"defaultEncodingVal\", defaultEncodingVal)\n\t\t}\n\t}\n\tsubreq, err := utils.CreateSubscribeRequest(cfg, mt.T.Config, defaultEncoding)\n\tif err != nil {\n\t\ttm.stats.subscriptionFailedCount.WithLabelValues(mt.Name, cfg.Name, subscriptionRequestErrorTypeCONFIG).Inc()\n\t\ttm.logger.Error(\"failed to create subscribe request\", \"target\", mt.Name, \"subscription\", cfg.Name, \"error\", err)\n\t\treturn err\n\t}\n\ttm.logger.Info(\"starting target Subscribe RPC\", \"name\", cfg.Name, \"target\", mt.Name)\n\n\tmt.T.Subscriptions[cfg.Name] = cfg\n\tmt.readerWG.Add(1)\n\tsctx, cfn := context.WithCancel(tm.ctx)\n\tmt.mu.Lock()\n\tmt.readersCfn[cfg.Name] = cfn\n\tmt.mu.Unlock()\n\n\tsubscriptionOutputs := make(map[string]struct{}, len(cfg.Outputs))\n\tfor _, output := range cfg.Outputs {\n\t\tsubscriptionOutputs[output] = struct{}{}\n\t}\n\trespCh, errCh := mt.T.SubscribeChan(sctx, subreq, cfg.Name)\n\tgo func() {\n\t\tdefer mt.readerWG.Done()\n\t\t// When the goroutine exits (subscription stopped/cancelled), refresh\n\t\t// the target state so the subscriptions map is up-to-date.\n\t\tdefer func() {\n\t\t\tcurrentState := tm.getTargetStateStr(mt.Name)\n\t\t\tif currentState != \"\" {\n\t\t\t\ttm.setTargetState(mt.Name, currentState)\n\t\t\t}\n\t\t}()\n\t\tinitialResponse := true\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-sctx.Done():\n\t\t\t\treturn\n\t\t\tcase resp, ok := <-respCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// The first response confirms the subscription is connected.\n\t\t\t\t// Refresh target state so the subscriptions map shows \"running\".\n\t\t\t\tif initialResponse {\n\t\t\t\t\tinitialResponse = false\n\t\t\t\t\tmt.clearLastError()\n\t\t\t\t\ttm.setTargetState(mt.Name, collstore.StateRunning)\n\t\t\t\t}\n\t\t\t\ttm.stats.subscribeResponseReceived.WithLabelValues(mt.Name, resp.SubscriptionName).Inc()\n\t\t\t\touts := func() map[string]struct{} {\n\t\t\t\t\tif len(subscriptionOutputs) > 0 {\n\t\t\t\t\t\tcp := make(map[string]struct{}, len(subscriptionOutputs))\n\t\t\t\t\t\tmaps.Copy(cp, subscriptionOutputs)\n\t\t\t\t\t\treturn cp\n\t\t\t\t\t}\n\t\t\t\t\tmt.RLock()\n\t\t\t\t\tdefer mt.RUnlock()\n\t\t\t\t\tcp := make(map[string]struct{}, len(mt.outputs))\n\t\t\t\t\tfor k := range mt.outputs {\n\t\t\t\t\t\tcp[k] = struct{}{}\n\t\t\t\t\t}\n\t\t\t\t\treturn cp\n\t\t\t\t}()\n\t\t\t\tselect {\n\t\t\t\tcase tm.out <- &pipeline.Msg{\n\t\t\t\t\tMsg: resp.Response,\n\t\t\t\t\tMeta: outputs.Meta{\n\t\t\t\t\t\t\"source\":            mt.Name,\n\t\t\t\t\t\t\"subscription-name\": resp.SubscriptionName,\n\t\t\t\t\t},\n\t\t\t\t\tOutputs: outs,\n\t\t\t\t}:\n\t\t\t\tdefault:\n\t\t\t\t\ttm.stats.droppedSubscribeResponses.WithLabelValues(mt.Name, resp.SubscriptionName).Inc()\n\t\t\t\t\t// If downstream is slow, you can drop, count, or block; here we drop to keep reader healthy.\n\t\t\t\t\ttm.logger.Warn(\"pipeline backpressure: dropping response\", \"target\", mt.Name)\n\t\t\t\t}\n\t\t\tcase err, ok := <-errCh:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Reset so the next successful response after retry\n\t\t\t\t// triggers a state update back to \"running\".\n\t\t\t\tinitialResponse = true\n\t\t\t\tmt.setLastError(err.Err.Error())\n\t\t\t\tcurrentState := tm.getTargetStateStr(mt.Name)\n\t\t\t\tif currentState != \"\" {\n\t\t\t\t\ttm.setTargetState(mt.Name, currentState)\n\t\t\t\t}\n\t\t\t\ttm.stats.subscriptionFailedCount.WithLabelValues(mt.Name, err.SubscriptionName, subscriptionRequestErrorTypeGRPC).Inc()\n\t\t\t\ttm.logger.Error(\"subscription error\", \"error\", err)\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc shouldReconnect(old, new *types.TargetConfig) bool {\n\tif old == nil && new != nil {\n\t\treturn true\n\t}\n\tif new == nil && old != nil {\n\t\treturn true\n\t}\n\n\tho, _ := hashConnSpec(old)\n\thn, _ := hashConnSpec(new)\n\treturn ho != hn\n}\n\n// TODO: optimize this\nfunc (tm *TargetsManager) compareSubscriptions(old, new []string) (added, removed []string) {\n\tvar subscriptionsList []string\n\tvar err error\n\tif len(new) == 0 || len(old) == 0 {\n\t\t// get all subscriptions from the store\n\t\tsubscriptionsList, err = tm.store.Config.Keys(\"subscriptions\")\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to get subscriptions from store\", \"error\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\tif len(new) == 0 {\n\t\tnew = subscriptionsList\n\t}\n\tif len(old) == 0 {\n\t\told = subscriptionsList\n\t}\n\n\toldSubs := make(map[string]struct{}, len(old))\n\tnewSubs := make(map[string]struct{}, len(new))\n\tfor _, sub := range old {\n\t\toldSubs[sub] = struct{}{}\n\t}\n\tfor _, sub := range new {\n\t\tnewSubs[sub] = struct{}{}\n\t}\n\tfor _, sub := range old {\n\t\tif _, ok := newSubs[sub]; !ok {\n\t\t\tremoved = append(removed, sub)\n\t\t}\n\t}\n\tfor _, sub := range new {\n\t\tif _, ok := oldSubs[sub]; !ok {\n\t\t\tadded = append(added, sub)\n\t\t}\n\t}\n\treturn added, removed\n}\n\nfunc (tm *TargetsManager) compareOutputs(old, new *types.TargetConfig) (added, removed []string) {\n\tif len(new.Outputs) == 0 {\n\t\t// get all outputs from the store\n\t\toutputs, err := tm.store.Config.List(\"outputs\")\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to get outputs\", \"error\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t\tnew.Outputs = keys(outputs)\n\t}\n\tif len(old.Outputs) == 0 {\n\t\t// get all outputs from the store\n\t\toutputs, err := tm.store.Config.List(\"outputs\")\n\t\tif err != nil {\n\t\t\ttm.logger.Error(\"failed to get outputs\", \"error\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t\told.Outputs = keys(outputs)\n\t\treturn nil, old.Outputs\n\t}\n\toldOutputs := make(map[string]struct{}, len(old.Outputs))\n\tnewOutputs := make(map[string]struct{}, len(new.Outputs))\n\tfor _, output := range old.Outputs {\n\t\toldOutputs[output] = struct{}{}\n\t}\n\tfor _, output := range new.Outputs {\n\t\tnewOutputs[output] = struct{}{}\n\t}\n\tfor _, output := range old.Outputs {\n\t\tif _, ok := newOutputs[output]; !ok {\n\t\t\tremoved = append(removed, output)\n\t\t}\n\t}\n\tfor _, output := range new.Outputs {\n\t\tif _, ok := oldOutputs[output]; !ok {\n\t\t\tadded = append(added, output)\n\t\t}\n\t}\n\treturn added, removed\n}\n\nfunc keys[T any](m map[string]T) []string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}\n\n// connSpec is the set of target parameters that affect the connection\ntype connSpec struct {\n\tAddress    string\n\tUsername   string\n\tPassword   string\n\tAuthScheme string\n\tToken      string\n\tProxy      string\n\n\tTimeout       time.Duration\n\tTCPKeepalive  time.Duration\n\tGRPCKeepalive *types.ClientKeepalive\n\n\t// TLS\n\tInsecure      bool\n\tTLSCA         string\n\tTLSCert       string\n\tTLSKey        string\n\tSkipVerify    bool\n\tTLSServerName string\n\tTLSMinVersion string\n\tTLSMaxVersion string\n\tTLSVersion    string\n\tCipherSuites  []string\n\n\t// Dial options that affect transport\n\tEncoding string\n\tGzip     bool\n}\n\nfunc hashConnSpec(cfg *types.TargetConfig) (uint64, error) {\n\tspec := connSpecFrom(cfg)\n\tb, err := json.Marshal(spec)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\th := fnv.New64a()\n\t_, _ = h.Write(b)\n\treturn h.Sum64(), nil\n}\n\nfunc connSpecFrom(tc *types.TargetConfig) connSpec {\n\tcs := make([]string, len(tc.CipherSuites))\n\tcopy(cs, tc.CipherSuites)\n\tsort.Strings(cs)\n\n\tspec := connSpec{\n\t\tAddress:       tc.Address,\n\t\tUsername:      val(tc.Username),\n\t\tPassword:      val(tc.Password),\n\t\tAuthScheme:    tc.AuthScheme,\n\t\tToken:         val(tc.Token),\n\t\tProxy:         tc.Proxy,\n\t\tTimeout:       tc.Timeout,\n\t\tTCPKeepalive:  tc.TCPKeepalive,\n\t\tGRPCKeepalive: tc.GRPCKeepalive,\n\t\tInsecure:      val(tc.Insecure),\n\t\tTLSCA:         val(tc.TLSCA),\n\t\tTLSCert:       val(tc.TLSCert),\n\t\tTLSKey:        val(tc.TLSKey),\n\t\tSkipVerify:    val(tc.SkipVerify),\n\t\tTLSServerName: tc.TLSServerName,\n\t\tTLSMinVersion: tc.TLSMinVersion,\n\t\tTLSMaxVersion: tc.TLSMaxVersion,\n\t\tTLSVersion:    tc.TLSVersion,\n\t\tCipherSuites:  cs,\n\t\tEncoding:      val(tc.Encoding),\n\t\tGzip:          val(tc.Gzip),\n\t}\n\treturn spec\n}\n\nfunc val[T any](p *T) T {\n\tvar z T\n\tif p == nil {\n\t\treturn z\n\t}\n\treturn *p\n}\n\n// watchConnState monitors the gRPC connectivity state of a target and updates\n// the state store whenever it changes. It runs until ctx is cancelled (i.e.\n// the target is stopped).\nfunc (tm *TargetsManager) watchConnState(ctx context.Context, mt *ManagedTarget) {\n\tfor {\n\t\tcurrentState := mt.T.ConnectivityState()\n\t\t// Block until the gRPC connection transitions away from currentState\n\t\t// or the context is cancelled.\n\t\tchanged := mt.T.WaitForConnStateChange(ctx, currentState)\n\t\tif !changed {\n\t\t\t// ctx was cancelled — target is shutting down\n\t\t\treturn\n\t\t}\n\t\tnewState := mt.T.ConnectivityState()\n\t\ttm.logger.Debug(\"target connectivity state changed\",\n\t\t\t\"name\", mt.Name,\n\t\t\t\"from\", currentState.String(),\n\t\t\t\"to\", newState.String(),\n\t\t)\n\t\t// Refresh the full target state in the store (picks up the new\n\t\t// ConnectionState via mt.T.ConnState()).\n\t\ttargetState := tm.getTargetStateStr(mt.Name)\n\t\tif targetState != \"\" {\n\t\t\ttm.setTargetState(mt.Name, targetState)\n\t\t}\n\t}\n}\n\n// State store helpers\n\n// setTargetState writes the full TargetState (including connection state and\n// per-subscription states) to the state store. The failed reason is read from\n// the ManagedTarget's lastError field (protected by mt.mu), so callers that\n// want to set or clear an error must call mt.setLastError before this method.\n// When the ManagedTarget is not found (e.g. after removal), failedReason\n// defaults to empty.\nfunc (tm *TargetsManager) setTargetState(name, state string) {\n\tintended := collstore.IntendedStateEnabled\n\tif state == collstore.StateStopped {\n\t\tintended = collstore.IntendedStateDisabled\n\t}\n\tts := &collstore.TargetState{\n\t\tComponentState: collstore.ComponentState{\n\t\t\t// Name:          name,\n\t\t\tIntendedState: intended,\n\t\t\tState:         state,\n\t\t\tLastUpdated:   time.Now(),\n\t\t},\n\t}\n\t// Enrich with live target data when available.\n\ttm.mu.RLock()\n\tmt := tm.targets[name]\n\ttm.mu.RUnlock()\n\tif mt != nil && mt.T != nil {\n\t\tts.FailedReason = mt.getLastError()\n\t\t// gRPC connection state\n\t\tts.ConnectionState = mt.T.ConnState()\n\t\t// Per-subscription states (snapshot taken under Target's internal lock\n\t\t// to avoid racing with attemptSubscription/StopSubscription).\n\t\tif subStates := mt.T.SubscribeClientStates(); len(subStates) > 0 {\n\t\t\tts.Subscriptions = make(map[string]string, len(subStates))\n\t\t\tfor subName, active := range subStates {\n\t\t\t\tif active {\n\t\t\t\t\tts.Subscriptions[subName] = collstore.StateRunning\n\t\t\t\t} else {\n\t\t\t\t\tts.Subscriptions[subName] = collstore.StateStopped\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttm.store.State.Set(collstore.KindTargets, name, ts)\n}\n\nfunc (tm *TargetsManager) getTargetStateStr(name string) string {\n\tts := tm.GetTargetState(name)\n\tif ts == nil {\n\t\treturn \"\"\n\t}\n\treturn ts.State\n}\n\n// GetTargetState returns the runtime state of a target from the state store.\nfunc (tm *TargetsManager) GetTargetState(name string) *collstore.TargetState {\n\tv, ok, err := tm.store.State.Get(collstore.KindTargets, name)\n\tif err != nil || !ok {\n\t\treturn nil\n\t}\n\tts, ok := v.(*collstore.TargetState)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn ts\n}\n\n// ListTargetStates returns all target states from the state store.\nfunc (tm *TargetsManager) ListTargetStates() []*collstore.TargetState {\n\tstates := make([]*collstore.TargetState, 0)\n\ttm.store.State.List(collstore.KindTargets, func(name string, v any) bool {\n\t\tif ts, ok := v.(*collstore.TargetState); ok {\n\t\t\tstates = append(states, ts)\n\t\t}\n\t\treturn false\n\t})\n\treturn states\n}\n"
  },
  {
    "path": "pkg/collector/managers/targets/tunnel_server.go",
    "content": "package targets_manager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"net\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/logging\"\n\ttpb \"github.com/openconfig/grpctunnel/proto/tunnel\"\n\t\"github.com/openconfig/grpctunnel/tunnel\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n)\n\n// smaller scoped config than the one used when loading the config from the file\ntype tunnelServerConfig struct {\n\tAddress       string           `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tTLS           *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\tEnableMetrics bool             `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tDebug         bool             `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\n// tunnelServer watches tunnel server config and reconciles\n// the connected targets when the config changes.\ntype tunnelServer struct {\n\tconfig *tunnelServerConfig\n\n\tgrpcTunnelSrv *grpc.Server\n\ttunServer     *tunnel.Server\n\tstore         store.Store[any]\n\tlogger        *slog.Logger\n\treg           *prometheus.Registry\n\n\t// track currently connected tunnel targets so we can reconcile when\n\t// tunnel-target-matches are created, updated, or deleted.\n\tmu               sync.RWMutex\n\tconnectedTargets map[string]tunnel.Target // key = target ID\n}\n\nfunc newTunnelServer(s store.Store[any], reg *prometheus.Registry) *tunnelServer {\n\tts := &tunnelServer{\n\t\tgrpcTunnelSrv:    grpc.NewServer(),\n\t\tstore:            s,\n\t\treg:              reg,\n\t\tconnectedTargets: make(map[string]tunnel.Target),\n\t}\n\n\treturn ts\n}\n\nfunc (ts *tunnelServer) gRPCTunnelServerOpts() ([]grpc.ServerOption, error) {\n\topts := make([]grpc.ServerOption, 0)\n\tif ts.config == nil {\n\t\treturn opts, nil\n\t}\n\tif ts.config.EnableMetrics && ts.reg != nil {\n\t\tgrpcMetrics := grpc_prometheus.NewServerMetrics()\n\t\topts = append(opts,\n\t\t\tgrpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()),\n\t\t\tgrpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()),\n\t\t)\n\t\tts.reg.MustRegister(grpcMetrics)\n\t}\n\n\tif ts.config.TLS == nil {\n\t\treturn opts, nil\n\t}\n\n\ttlscfg, err := utils.NewTLSConfig(\n\t\tts.config.TLS.CaFile,\n\t\tts.config.TLS.CertFile,\n\t\tts.config.TLS.KeyFile,\n\t\tts.config.TLS.ClientAuth,\n\t\tfalse,\n\t\ttrue,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlscfg != nil {\n\t\topts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg)))\n\t}\n\n\treturn opts, nil\n}\n\nfunc (ts *tunnelServer) startTunnelServer(ctx context.Context) error {\n\ttscfg, found, err := ts.store.Get(\"tunnel-server\", \"tunnel-server\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !found {\n\t\treturn nil\n\t}\n\tif tscfg == nil {\n\t\treturn nil\n\t}\n\tlogger := logging.NewLogger(ts.store, \"component\", \"tunnel-server\")\n\tts.logger = logger\n\toriginalConfig, ok := tscfg.(*config.TunnelServer)\n\tif !ok {\n\t\treturn fmt.Errorf(\"tunnel-server config is malfomatted\")\n\t}\n\tif originalConfig == nil {\n\t\treturn nil\n\t}\n\tts.config = &tunnelServerConfig{\n\t\tAddress:       originalConfig.Address,\n\t\tTLS:           originalConfig.TLS,\n\t\tEnableMetrics: originalConfig.EnableMetrics,\n\t\tDebug:         originalConfig.Debug,\n\t}\n\n\tts.logger.Info(\"building tunnel server\")\n\tts.tunServer, err = tunnel.NewServer(tunnel.ServerConfig{\n\t\tAddTargetHandler:    ts.addTargetHandler,\n\t\tDeleteTargetHandler: ts.deleteTargetHandler,\n\t\tRegisterHandler:     ts.registerHandler,\n\t\tHandler:             ts.serverHandler,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\topts, err := ts.gRPCTunnelServerOpts()\n\tif err != nil {\n\t\treturn err\n\t}\n\tts.grpcTunnelSrv = grpc.NewServer(opts...)\n\ttpb.RegisterTunnelServer(ts.grpcTunnelSrv, ts.tunServer)\n\tvar l net.Listener\n\tnetwork := \"tcp\"\n\taddr := ts.config.Address\n\tif strings.HasPrefix(ts.config.Address, \"unix://\") {\n\t\tnetwork = \"unix\"\n\t\taddr = strings.TrimPrefix(addr, \"unix://\")\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tfor {\n\t\tvar err error\n\t\tl, err = net.Listen(network, addr)\n\t\tif err != nil {\n\t\t\tts.logger.Error(\"failed to start gRPC tunnel server listener\", \"error\", err)\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\t// watch tunnel-target-matches for CRUD operations and reconcile connected targets\n\tvar matchesCh <-chan *store.Event[any]\n\tvar matchesCancel func()\n\tfor {\n\t\tvar err error\n\t\tmatchesCh, matchesCancel, err = ts.store.Watch(\"tunnel-target-matches\")\n\t\tif err != nil {\n\t\t\tts.logger.Error(\"failed to watch tunnel-target-matches\", \"error\", err)\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tgo ts.watchTunnelTargetMatches(ctx, matchesCh, matchesCancel)\n\n\tgo func() {\n\t\tts.logger.Info(\"starting gRPC tunnel server\")\n\t\terr := ts.grpcTunnelSrv.Serve(l)\n\t\tif err != nil {\n\t\t\tts.logger.Error(\"gRPC tunnel server shutdown\", \"error\", err)\n\t\t}\n\t\tcancel()\n\t}()\n\tdefer ts.grpcTunnelSrv.Stop()\n\tfor range ctx.Done() {\n\t}\n\treturn ctx.Err()\n}\n\n// Tunnel Server handlers\n\n// addTargetHandler is called when a tunnel target connects (registers)\nfunc (ts *tunnelServer) addTargetHandler(tt tunnel.Target) error {\n\tts.logger.Info(\"tunnel server target register request\", \"target\", tt)\n\n\t// track the connected target so we can reconcile when matches change\n\tts.mu.Lock()\n\tts.connectedTargets[tt.ID] = tt\n\tts.mu.Unlock()\n\n\ttc := ts.getTunnelTargetMatch(tt)\n\tif tc == nil {\n\t\tts.logger.Info(\"target ignored, not matching any rule\", \"target\", tt)\n\t\treturn nil\n\t}\n\tts.logger.Info(\"target matched\", \"target\", tc)\n\t_, err := ts.store.Set(\"targets\", tc.Name, tc)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// deleteTargetHandler is called when a tunnel target disconnects (deregisters)\nfunc (ts *tunnelServer) deleteTargetHandler(tt tunnel.Target) error {\n\tts.logger.Info(\"tunnel server target deregister request\", \"target\", tt)\n\n\t// remove from connected targets tracking\n\tts.mu.Lock()\n\tdelete(ts.connectedTargets, tt.ID)\n\tts.mu.Unlock()\n\n\t_, _, err := ts.store.Delete(\"targets\", tt.ID)\n\tif err != nil {\n\t\tts.logger.Error(\"failed to delete tunnel target from configStore\", \"error\", err)\n\t}\n\treturn nil\n}\n\nfunc (ts *tunnelServer) registerHandler(ss tunnel.ServerSession) error {\n\treturn nil\n}\n\nfunc (ts *tunnelServer) serverHandler(ss tunnel.ServerSession, rwc io.ReadWriteCloser) error {\n\treturn nil\n}\n\nfunc (ts *tunnelServer) getTunnelTargetMatch(tt tunnel.Target) *types.TargetConfig {\n\tmatchingConfigs, err := ts.store.List(\"tunnel-target-matches\", func(key string, value any) bool {\n\t\tswitch tm := value.(type) {\n\t\tcase *config.TunnelTargetMatch:\n\t\t\t// check if the registering target matches corresponding ID\n\t\t\tok, err := regexp.MatchString(tm.ID, tt.ID)\n\t\t\tif err != nil {\n\t\t\t\tts.logger.Error(\"regex eval failed with string\", \"error\", err, \"id\", tm.ID, \"target\", tt.ID)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// check if the registering target matches corresponding type\n\t\t\tok, err = regexp.MatchString(tm.Type, tt.Type)\n\t\t\tif err != nil {\n\t\t\t\tts.logger.Error(\"regex eval failed with string\", \"error\", err, \"type\", tm.Type, \"target\", tt.Type)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// target has a match,\n\t\t\ttc := new(types.TargetConfig)\n\t\t\t*tc = tm.Config\n\t\t\ttc.Name = tt.ID\n\t\t\ttc.TunnelTargetType = tt.Type\n\t\t\terr = config.SetTargetConfigDefaults(ts.store, tc)\n\t\t\tif err != nil {\n\t\t\t\tts.logger.Error(\"failed to set target config defaults\", \"error\", err, \"id\", tt.ID, \"type\", tt.Type)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\tts.logger.Error(\"failed to list tunnel target matches\", \"error\", err)\n\t\treturn nil\n\t}\n\tif len(matchingConfigs) == 0 {\n\t\treturn nil\n\t}\n\t// get keys and sort them\n\tkeys := make([]string, 0, len(matchingConfigs))\n\tfor key := range matchingConfigs {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\t// take the first match and set the target config defaults\n\tmconfig := matchingConfigs[keys[0]].(*config.TunnelTargetMatch)\n\ttc := new(types.TargetConfig)\n\t*tc = mconfig.Config\n\ttc.Name = tt.ID\n\ttc.TunnelTargetType = tt.Type\n\terr = config.SetTargetConfigDefaults(ts.store, tc)\n\tif err != nil {\n\t\tts.logger.Error(\"failed to set target config defaults\", \"error\", err, \"id\", tt.ID, \"type\", tt.Type)\n\t\treturn nil\n\t}\n\n\treturn tc\n}\n\n// watchTunnelTargetMatches watches for changes to tunnel-target-matches and\n// reconciles all connected tunnel targets when a match is created, updated, or deleted.\nfunc (ts *tunnelServer) watchTunnelTargetMatches(ctx context.Context, ch <-chan *store.Event[any], cancel func()) {\n\tdefer cancel()\n\tts.logger.Info(\"starting tunnel-target-matches watcher\")\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tts.logger.Info(\"tunnel-target-matches watcher stopped\")\n\t\t\treturn\n\t\tcase ev, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\tts.logger.Info(\"tunnel-target-matches watch channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tts.logger.Info(\"tunnel-target-match changed, reconciling connected targets\",\n\t\t\t\t\"eventType\", ev.EventType,\n\t\t\t\t\"matchID\", ev.Name,\n\t\t\t)\n\t\t\tts.reconcileConnectedTargets()\n\t\t}\n\t}\n}\n\n// reconcileConnectedTargets re-evaluates all connected tunnel targets against\n// the current set of tunnel-target-matches. This is called when a match rule\n// is created, updated, or deleted.\n//\n// For each connected target:\n//   - If it matches a rule: upsert the target config (create or update)\n//   - If it doesn't match any rule: delete the target config\n//\n// We hold the lock for the entire reconciliation to prevent a race where a target\n// deregisters (and gets deleted from the store) while we're processing it, which\n// would cause us to recreate an orphaned target config.\nfunc (ts *tunnelServer) reconcileConnectedTargets() {\n\tts.mu.Lock()\n\tdefer ts.mu.Unlock()\n\n\tts.logger.Info(\"reconciling connected tunnel targets\", \"count\", len(ts.connectedTargets))\n\n\tfor _, tt := range ts.connectedTargets {\n\t\ttc := ts.getTunnelTargetMatch(tt)\n\t\tif tc != nil {\n\t\t\t// target matches a rule ==> upsert the target config\n\t\t\tts.logger.Debug(\"tunnel target matches rule, upserting config\",\n\t\t\t\t\"targetID\", tt.ID,\n\t\t\t\t\"targetType\", tt.Type,\n\t\t\t)\n\t\t\t_, err := ts.store.Set(\"targets\", tc.Name, tc)\n\t\t\tif err != nil {\n\t\t\t\tts.logger.Error(\"failed to upsert tunnel target config\",\n\t\t\t\t\t\"targetID\", tt.ID,\n\t\t\t\t\t\"error\", err,\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\t// target no longer matches any rule ==> delete the target config\n\t\t\tts.logger.Debug(\"tunnel target no longer matches any rule, deleting config\",\n\t\t\t\t\"targetID\", tt.ID,\n\t\t\t\t\"targetType\", tt.Type,\n\t\t\t)\n\t\t\t_, _, err := ts.store.Delete(\"targets\", tt.ID)\n\t\t\tif err != nil {\n\t\t\t\tts.logger.Error(\"failed to delete tunnel target config\",\n\t\t\t\t\t\"targetID\", tt.ID,\n\t\t\t\t\t\"error\", err,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\tts.logger.Info(\"tunnel target reconciliation complete\", \"count\", len(ts.connectedTargets))\n}\n"
  },
  {
    "path": "pkg/collector/store/store.go",
    "content": "// © 2026 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage store\n\nimport (\n\tzstore \"github.com/zestor-dev/zestor/store\"\n\t\"github.com/zestor-dev/zestor/store/gomap\"\n)\n\n// Store wraps both the config store and the state store.\n// The config store holds user-defined configuration (targets, subscriptions, outputs, inputs, etc.).\n// The state store holds runtime state for each component (running, stopped, failed, etc.).\ntype Store struct {\n\tConfig zstore.Store[any]\n\tState  zstore.Store[any]\n}\n\n// NewStore creates a new Store with the given config store and a fresh\n// in-memory state store.\nfunc NewStore(configStore zstore.Store[any]) *Store {\n\treturn &Store{\n\t\tConfig: configStore,\n\t\tState:  gomap.NewMemStore(zstore.StoreOptions[any]{}),\n\t}\n}\n"
  },
  {
    "path": "pkg/collector/store/types.go",
    "content": "// © 2026 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage store\n\nimport \"time\"\n\n// State constants shared across all component types.\nconst (\n\tIntendedStateEnabled  = \"enabled\"\n\tIntendedStateDisabled = \"disabled\"\n\n\tStateRunning  = \"running\"\n\tStateStopped  = \"stopped\"\n\tStateStarting = \"starting\"\n\tStateFailed   = \"failed\"\n\tStatePaused   = \"paused\"\n\tStateStopping = \"stopping\"\n)\n\n// Kind names used in the state store.\nconst (\n\tKindTargets             = \"targets\"\n\tKindOutputs             = \"outputs\"\n\tKindInputs              = \"inputs\"\n\tKindSubscriptions       = \"subscriptions\"\n\tKindProcessors          = \"processors\"\n\tKindAssignments         = \"assignments\"\n\tKindTunnelTargetMatches = \"tunnel-target-matches\"\n)\n\n// ComponentState is the base state shared by all managed components.\ntype ComponentState struct {\n\t// Name          string    `json:\"name\"`\n\tIntendedState string    `json:\"intended-state\"`          // enabled|disabled\n\tState         string    `json:\"state\"`                   // running|stopped|starting|failed|paused|stopping\n\tFailedReason  string    `json:\"failed-reason,omitempty\"` // last error message\n\tLastUpdated   time.Time `json:\"last-updated\"`            // timestamp of last state transition\n}\n\n// TargetState extends ComponentState with target-specific fields.\ntype TargetState struct {\n\tComponentState\n\tConnectionState string            `json:\"connection-state,omitempty\"` // gRPC connectivity: READY|CONNECTING|TRANSIENT_FAILURE|...\n\tSubscriptions   map[string]string `json:\"subscriptions,omitempty\"`    // subscription_name -> running|stopped\n}\n\n// OutputState extends ComponentState with output-specific fields.\ntype OutputState struct {\n\tComponentState\n}\n\n// InputState extends ComponentState with input-specific fields.\ntype InputState struct {\n\tComponentState\n}\n\n// SubscriptionState tracks a subscription's aggregate state across targets.\ntype SubscriptionState struct {\n\tComponentState\n\tTargets map[string]string `json:\"targets,omitempty\"` // target_name -> running|stopped|starting|failed|paused|stopping\n}\n"
  },
  {
    "path": "pkg/config/actions.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n)\n\nfunc (c *Config) GetActions() (map[string]map[string]interface{}, error) {\n\tfor name, actc := range c.FileConfig.GetStringMap(\"actions\") {\n\t\tswitch actc := actc.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tc.logger.Printf(\"validating action %q config\", name)\n\t\t\terr := c.validateActionsConfig(actc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// set action name if not configured\n\t\t\tif cname, ok := actc[\"name\"]; !ok || cname == \"\" {\n\t\t\t\tactc[\"name\"] = name\n\t\t\t}\n\t\t\tfor nn, a := range actc {\n\t\t\t\tactc[nn] = convert(a)\n\t\t\t}\n\t\t\tc.Actions[name] = actc\n\t\tcase nil:\n\t\t\treturn nil, fmt.Errorf(\"empty action %q config\", name)\n\t\tdefault:\n\t\t\tc.logger.Printf(\"malformed action config, %+v\", actc)\n\t\t\treturn nil, fmt.Errorf(\"malformed action config, got %T\", actc)\n\t\t}\n\t}\n\tfor n := range c.Actions {\n\t\texpandMapEnv(c.Actions[n],\n\t\t\texpandExcept(\n\t\t\t\t\"target\", \"paths\", \"values\", // gnmi action templates\n\t\t\t\t\"url\", \"body\", // http action templates\n\t\t\t\t\"template\", // template action templates\n\t\t\t))\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"actions: %+v\", c.Actions)\n\t}\n\treturn c.Actions, nil\n}\n\nfunc (c *Config) validateActionsConfig(acfg map[string]interface{}) error {\n\tif aType, ok := acfg[\"type\"]; ok {\n\t\tswitch aType := aType.(type) {\n\t\tcase string:\n\t\t\tif !strInlist(aType, actions.ActionTypes) {\n\t\t\t\treturn fmt.Errorf(\"unknown action type: %s, must be one of %q\", aType, actions.ActionTypes)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unexpected action type variable type, expecting string, got %T\", aType)\n\t\t}\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"missing action type under %+v\", acfg)\n}\n"
  },
  {
    "path": "pkg/config/api_server.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nconst (\n\tdefaultAPIServerAddress = \":7890\"\n\tdefaultAPIServerTimeout = 10 * time.Second\n\ttrueString              = \"true\"\n)\n\ntype APIServer struct {\n\tAddress               string           `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tTimeout               time.Duration    `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tTLS                   *types.TLSConfig `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tEnableMetrics         bool             `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tEnableProfiling       bool             `mapstructure:\"enable-profiling,omitempty\" json:\"enable-profiling,omitempty\"`\n\tDebug                 bool             `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tHealthzDisableLogging bool             `mapstructure:\"healthz-disable-logging,omitempty\" json:\"healthz-disable-logging,omitempty\"`\n}\n\nfunc (c *Config) GetAPIServer() error {\n\tif !c.FileConfig.IsSet(\"api-server\") && c.API == \"\" {\n\t\treturn nil\n\t}\n\tc.APIServer = new(APIServer)\n\tc.APIServer.Address = os.ExpandEnv(c.FileConfig.GetString(\"api-server/address\"))\n\tif c.APIServer.Address == \"\" {\n\t\tc.APIServer.Address = os.ExpandEnv(c.FileConfig.GetString(\"api\"))\n\t}\n\tc.APIServer.Timeout = c.FileConfig.GetDuration(\"api-server/timeout\")\n\tif c.FileConfig.IsSet(\"api-server/tls\") {\n\t\tc.APIServer.TLS = new(types.TLSConfig)\n\t\tc.APIServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString(\"api-server/tls/ca-file\"))\n\t\tc.APIServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString(\"api-server/tls/cert-file\"))\n\t\tc.APIServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString(\"api-server/tls/key-file\"))\n\t\tc.APIServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString(\"api-server/tls/client-auth\"))\n\t\tif err := c.APIServer.TLS.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"api-server TLS config error: %w\", err)\n\t\t}\n\t}\n\n\tc.APIServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString(\"api-server/enable-metrics\")) == trueString\n\tc.APIServer.EnableProfiling = os.ExpandEnv(c.FileConfig.GetString(\"api-server/enable-profiling\")) == trueString\n\tc.APIServer.Debug = os.ExpandEnv(c.FileConfig.GetString(\"api-server/debug\")) == trueString\n\tc.APIServer.HealthzDisableLogging = os.ExpandEnv(c.FileConfig.GetString(\"api-server/healthz-disable-logging\")) == trueString\n\tc.setAPIServerDefaults()\n\treturn nil\n}\n\nfunc (c *Config) setAPIServerDefaults() {\n\tif c.APIServer.Address == \"\" {\n\t\tc.APIServer.Address = defaultAPIServerAddress\n\t}\n\tif c.APIServer.Timeout <= 0 {\n\t\tc.APIServer.Timeout = defaultAPIServerTimeout\n\t}\n}\n"
  },
  {
    "path": "pkg/config/clustering.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nconst (\n\tminTargetWatchTimer            = 20 * time.Second\n\tdefaultTargetAssignmentTimeout = 10 * time.Second\n\tdefaultServicesWatchTimer      = 1 * time.Minute\n\tdefaultLeaderWaitTimer         = 5 * time.Second\n)\n\ntype Clustering struct {\n\tClusterName             string                 `mapstructure:\"cluster-name,omitempty\" json:\"cluster-name,omitempty\" yaml:\"cluster-name,omitempty\"`\n\tInstanceName            string                 `mapstructure:\"instance-name,omitempty\" json:\"instance-name,omitempty\" yaml:\"instance-name,omitempty\"`\n\tServiceAddress          string                 `mapstructure:\"service-address,omitempty\" json:\"service-address,omitempty\" yaml:\"service-address,omitempty\"`\n\tServicesWatchTimer      time.Duration          `mapstructure:\"services-watch-timer,omitempty\" json:\"services-watch-timer,omitempty\" yaml:\"services-watch-timer,omitempty\"`\n\tTargetsWatchTimer       time.Duration          `mapstructure:\"targets-watch-timer,omitempty\" json:\"targets-watch-timer,omitempty\" yaml:\"targets-watch-timer,omitempty\"`\n\tTargetAssignmentTimeout time.Duration          `mapstructure:\"target-assignment-timeout,omitempty\" json:\"target-assignment-timeout,omitempty\" yaml:\"target-assignment-timeout,omitempty\"`\n\tLeaderWaitTimer         time.Duration          `mapstructure:\"leader-wait-timer,omitempty\" json:\"leader-wait-timer,omitempty\" yaml:\"leader-wait-timer,omitempty\"`\n\tTags                    []string               `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\" yaml:\"tags,omitempty\"`\n\tLocker                  map[string]interface{} `mapstructure:\"locker,omitempty\" json:\"locker,omitempty\" yaml:\"locker,omitempty\"`\n\tTLS                     *types.TLSConfig       `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\" yaml:\"tls,omitempty\"`\n}\n\nfunc (c *Config) GetClustering() error {\n\tif !c.FileConfig.IsSet(\"clustering\") {\n\t\treturn nil\n\t}\n\tc.Clustering = new(Clustering)\n\tc.Clustering.ClusterName = os.ExpandEnv(c.FileConfig.GetString(\"clustering/cluster-name\"))\n\tc.Clustering.InstanceName = os.ExpandEnv(c.FileConfig.GetString(\"clustering/instance-name\"))\n\tc.Clustering.ServiceAddress = os.ExpandEnv(c.FileConfig.GetString(\"clustering/service-address\"))\n\tc.Clustering.TargetsWatchTimer = c.FileConfig.GetDuration(\"clustering/targets-watch-timer\")\n\tc.Clustering.TargetAssignmentTimeout = c.FileConfig.GetDuration(\"clustering/target-assignment-timeout\")\n\tc.Clustering.ServicesWatchTimer = c.FileConfig.GetDuration(\"clustering/services-watch-timer\")\n\tc.Clustering.LeaderWaitTimer = c.FileConfig.GetDuration(\"clustering/leader-wait-timer\")\n\tc.Clustering.Tags = c.FileConfig.GetStringSlice(\"clustering/tags\")\n\tfor i := range c.Clustering.Tags {\n\t\tc.Clustering.Tags[i] = os.ExpandEnv(c.Clustering.Tags[i])\n\t}\n\tif c.FileConfig.IsSet(\"clustering/tls\") {\n\t\tc.Clustering.TLS = new(types.TLSConfig)\n\t\tc.Clustering.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString(\"clustering/tls/ca-file\"))\n\t\tc.Clustering.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString(\"clustering/tls/cert-file\"))\n\t\tc.Clustering.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString(\"clustering/tls/key-file\"))\n\t\tc.Clustering.TLS.SkipVerify = os.ExpandEnv(c.FileConfig.GetString(\"clustering/tls/skip-verify\")) == trueString\n\t\tif err := c.APIServer.TLS.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"clustering TLS config error: %w\", err)\n\t\t}\n\t}\n\tc.setClusteringDefaults()\n\treturn c.getLocker()\n}\n\nfunc (c *Config) setClusteringDefaults() {\n\t// set $clustering.cluster-name to $cluster-name if it's empty string\n\tif c.Clustering.ClusterName == \"\" {\n\t\tc.Clustering.ClusterName = c.ClusterName\n\t\t// otherwise, set $cluster-name to $clustering.cluster-name\n\t} else {\n\t\tc.ClusterName = c.Clustering.ClusterName\n\t}\n\t// set clustering.instance-name to instance-name\n\tif c.Clustering.InstanceName == \"\" {\n\t\tif c.InstanceName != \"\" {\n\t\t\tc.Clustering.InstanceName = c.InstanceName\n\t\t} else {\n\t\t\tc.Clustering.InstanceName = \"gnmic-\" + uuid.New().String()\n\t\t}\n\t} else {\n\t\tc.InstanceName = c.Clustering.InstanceName\n\t}\n\t// the timers are set to less than the min allowed value,\n\t// make them default to that min value.\n\tif c.Clustering.TargetsWatchTimer < minTargetWatchTimer {\n\t\tc.Clustering.TargetsWatchTimer = minTargetWatchTimer\n\t}\n\tif c.Clustering.TargetAssignmentTimeout < defaultTargetAssignmentTimeout {\n\t\tc.Clustering.TargetAssignmentTimeout = defaultTargetAssignmentTimeout\n\t}\n\tif c.Clustering.ServicesWatchTimer <= defaultServicesWatchTimer {\n\t\tc.Clustering.ServicesWatchTimer = defaultServicesWatchTimer\n\t}\n\tif c.Clustering.LeaderWaitTimer <= defaultLeaderWaitTimer {\n\t\tc.Clustering.LeaderWaitTimer = defaultLeaderWaitTimer\n\t}\n}\n"
  },
  {
    "path": "pkg/config/config.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/adrg/xdg\"\n\t\"github.com/itchyny/gojq\"\n\t\"github.com/mitchellh/go-homedir\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n\t\"github.com/spf13/viper\"\n\t\"gopkg.in/natefinch/lumberjack.v2\"\n\tyaml \"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tconfigName      = \".gnmic\"\n\tconfigLogPrefix = \"[config] \"\n\tenvPrefix       = \"GNMIC\"\n\ttrimChars       = \" \\r\\n\\t\"\n)\n\nvar ErrInvalidConfig = errors.New(\"invalid configuration\")\n\nvar osPathFlags = []string{\"tls-ca\", \"tls-cert\", \"tls-key\"}\n\ntype Config struct {\n\tGlobalFlags `mapstructure:\",squash\"`\n\tLocalFlags  `mapstructure:\",squash\"`\n\tFileConfig  *viper.Viper `mapstructure:\"-\" json:\"-\" yaml:\"-\" `\n\n\tTargets       map[string]*types.TargetConfig       `mapstructure:\"targets,omitempty\" json:\"targets,omitempty\" yaml:\"targets,omitempty\"`\n\tSubscriptions map[string]*types.SubscriptionConfig `mapstructure:\"subscriptions,omitempty\" json:\"subscriptions,omitempty\" yaml:\"subscriptions,omitempty\"`\n\tOutputs       map[string]map[string]any            `mapstructure:\"outputs,omitempty\" json:\"outputs,omitempty\" yaml:\"outputs,omitempty\"`\n\tInputs        map[string]map[string]any            `mapstructure:\"inputs,omitempty\" json:\"inputs,omitempty\" yaml:\"inputs,omitempty\"`\n\tProcessors    map[string]map[string]any            `mapstructure:\"processors,omitempty\" json:\"processors,omitempty\" yaml:\"processors,omitempty\"`\n\tClustering    *Clustering                          `mapstructure:\"clustering,omitempty\" json:\"clustering,omitempty\" yaml:\"clustering,omitempty\"`\n\tGnmiServer    *GNMIServer                          `mapstructure:\"gnmi-server,omitempty\" json:\"gnmi-server,omitempty\" yaml:\"gnmi-server,omitempty\"`\n\tAPIServer     *APIServer                           `mapstructure:\"api-server,omitempty\" json:\"api-server,omitempty\" yaml:\"api-server,omitempty\"`\n\tLoader        map[string]any                       `mapstructure:\"loader,omitempty\" json:\"loader,omitempty\" yaml:\"loader,omitempty\"`\n\tActions       map[string]map[string]any            `mapstructure:\"actions,omitempty\" json:\"actions,omitempty\" yaml:\"actions,omitempty\"`\n\tTunnelServer  *TunnelServer                        `mapstructure:\"tunnel-server,omitempty\" json:\"tunnel-server,omitempty\" yaml:\"tunnel-server,omitempty\"`\n\t//\n\tlogger             *log.Logger\n\tsetRequestTemplate []*template.Template\n\tsetRequestVars     map[string]any\n}\n\nvar ValueTypes = []string{\"json\", \"json_ietf\", \"string\", \"int\", \"uint\", \"bool\", \"decimal\", \"float\", \"bytes\", \"ascii\"}\n\ntype GlobalFlags struct {\n\tCfgFile       string\n\tAddress       []string      `mapstructure:\"address,omitempty\" json:\"address,omitempty\" yaml:\"address,omitempty\"`\n\tUsername      string        `mapstructure:\"username,omitempty\" json:\"username,omitempty\" yaml:\"username,omitempty\"`\n\tPassword      string        `mapstructure:\"password,omitempty\" json:\"password,omitempty\" yaml:\"password,omitempty\"`\n\tPort          string        `mapstructure:\"port,omitempty\" json:\"port,omitempty\" yaml:\"port,omitempty\"`\n\tEncoding      string        `mapstructure:\"encoding,omitempty\" json:\"encoding,omitempty\" yaml:\"encoding,omitempty\"`\n\tInsecure      bool          `mapstructure:\"insecure,omitempty\" json:\"insecure,omitempty\" yaml:\"insecure,omitempty\"`\n\tTLSCa         string        `mapstructure:\"tls-ca,omitempty\" json:\"tls-ca,omitempty\" yaml:\"tls-ca,omitempty\"`\n\tTLSCert       string        `mapstructure:\"tls-cert,omitempty\" json:\"tls-cert,omitempty\" yaml:\"tls-cert,omitempty\"`\n\tTLSKey        string        `mapstructure:\"tls-key,omitempty\" json:\"tls-key,omitempty\" yaml:\"tls-key,omitempty\"`\n\tTLSMinVersion string        `mapstructure:\"tls-min-version,omitempty\" json:\"tls-min-version,omitempty\" yaml:\"tls-min-version,omitempty\"`\n\tTLSMaxVersion string        `mapstructure:\"tls-max-version,omitempty\" json:\"tls-max-version,omitempty\" yaml:\"tls-max-version,omitempty\"`\n\tTLSVersion    string        `mapstructure:\"tls-version,omitempty\" json:\"tls-version,omitempty\" yaml:\"tls-version,omitempty\"`\n\tTLSServerName string        `mapstructure:\"tls-server-name,omitempty\" json:\"tls-server-name,omitempty\" yaml:\"tls-server-name,omitempty\"`\n\tLogTLSSecret  bool          `mapstructure:\"log-tls-secret,omitempty\" json:\"log-tls-secret,omitempty\" yaml:\"log-tls-secret,omitempty\"`\n\tTimeout       time.Duration `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\" yaml:\"timeout,omitempty\"`\n\tDebug         bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\" yaml:\"debug,omitempty\"`\n\tEnablePprof   bool          `mapstructure:\"enable-pprof,omitempty\" json:\"enable-pprof,omitempty\" yaml:\"enable-pprof,omitempty\"`\n\tPprofAddr     string        `mapstructure:\"pprof-addr,omitempty\" json:\"pprof-addr,omitempty\" yaml:\"pprof-addr,omitempty\"`\n\tSkipVerify    bool          `mapstructure:\"skip-verify,omitempty\" json:\"skip-verify,omitempty\" yaml:\"skip-verify,omitempty\"`\n\tNoPrefix      bool          `mapstructure:\"no-prefix,omitempty\" json:\"no-prefix,omitempty\" yaml:\"no-prefix,omitempty\"`\n\tProxyFromEnv  bool          `mapstructure:\"proxy-from-env,omitempty\" json:\"proxy-from-env,omitempty\" yaml:\"proxy-from-env,omitempty\"`\n\tFormat        string        `mapstructure:\"format,omitempty\" json:\"format,omitempty\" yaml:\"format,omitempty\"`\n\tLogFile       string        `mapstructure:\"log-file,omitempty\" json:\"log-file,omitempty\" yaml:\"log-file,omitempty\"`\n\tLog           bool          `mapstructure:\"log,omitempty\" json:\"log,omitempty\" yaml:\"log,omitempty\"`\n\tLogMaxSize    int           `mapstructure:\"log-max-size,omitempty\" json:\"log-max-size,omitempty\" yaml:\"log-max-size,omitempty\"`\n\tLogMaxBackups int           `mapstructure:\"log-max-backups,omitempty\" json:\"log-max-backups,omitempty\" yaml:\"log-max-backups,omitempty\"`\n\tLogCompress   bool          `mapstructure:\"log-compress,omitempty\" json:\"log-compress,omitempty\" yaml:\"log-compress,omitempty\"`\n\tMaxMsgSize    int           `mapstructure:\"max-msg-size,omitempty\" json:\"max-msg-size,omitempty\" yaml:\"max-msg-size,omitempty\"`\n\t//PrometheusAddress string        `mapstructure:\"prometheus-address,omitempty\" json:\"prometheus-address,omitempty\" yaml:\"prometheus-address,omitempty\"`\n\tPrintRequest         bool          `mapstructure:\"print-request,omitempty\" json:\"print-request,omitempty\" yaml:\"print-request,omitempty\"`\n\tRetry                time.Duration `mapstructure:\"retry,omitempty\" json:\"retry,omitempty\" yaml:\"retry,omitempty\"`\n\tTargetBufferSize     uint          `mapstructure:\"target-buffer-size,omitempty\" json:\"target-buffer-size,omitempty\" yaml:\"target-buffer-size,omitempty\"`\n\tClusterName          string        `mapstructure:\"cluster-name,omitempty\" json:\"cluster-name,omitempty\" yaml:\"cluster-name,omitempty\"`\n\tInstanceName         string        `mapstructure:\"instance-name,omitempty\" json:\"instance-name,omitempty\" yaml:\"instance-name,omitempty\"`\n\tAPI                  string        `mapstructure:\"api,omitempty\" json:\"api,omitempty\" yaml:\"api,omitempty\"`\n\tProtoFile            []string      `mapstructure:\"proto-file,omitempty\" json:\"proto-file,omitempty\" yaml:\"proto-file,omitempty\"`\n\tProtoDir             []string      `mapstructure:\"proto-dir,omitempty\" json:\"proto-dir,omitempty\" yaml:\"proto-dir,omitempty\"`\n\tRegisteredExtensions []string      `mapstructure:\"registered-extensions,omitempty\" json:\"registered-extensions,omitempty\" yaml:\"registered-extensions,omitempty\"`\n\tRequestExtensions    string        `mapstructure:\"request-extensions,omitempty\" json:\"request-extensions,omitempty\" yaml:\"request-extensions,omitempty\"`\n\tTargetsFile          string        `mapstructure:\"targets-file,omitempty\" json:\"targets-file,omitempty\" yaml:\"targets-file,omitempty\"`\n\tGzip                 bool          `mapstructure:\"gzip,omitempty\" json:\"gzip,omitempty\" yaml:\"gzip,omitempty\"`\n\tFile                 []string      `mapstructure:\"file,omitempty\" json:\"file,omitempty\" yaml:\"file,omitempty\"`\n\tDir                  []string      `mapstructure:\"dir,omitempty\" json:\"dir,omitempty\" yaml:\"dir,omitempty\"`\n\tExclude              []string      `mapstructure:\"exclude,omitempty\" json:\"exclude,omitempty\" yaml:\"exclude,omitempty\"`\n\tToken                string        `mapstructure:\"token,omitempty\" json:\"token,omitempty\" yaml:\"token,omitempty\"`\n\tUseTunnelServer      bool          `mapstructure:\"use-tunnel-server,omitempty\" json:\"use-tunnel-server,omitempty\" yaml:\"use-tunnel-server,omitempty\"`\n\tAuthScheme           string        `mapstructure:\"auth-scheme,omitempty\" json:\"auth-scheme,omitempty\" yaml:\"auth-scheme,omitempty\"`\n\tCalculateLatency     bool          `mapstructure:\"calculate-latency,omitempty\" json:\"calculate-latency,omitempty\" yaml:\"calculate-latency,omitempty\"`\n\n\tMetadata             map[string]string `mapstructure:\"metadata,omitempty\" json:\"metadata,omitempty\" yaml:\"metadata,omitempty\"`\n\tPluginProcessorsPath string            `mapstructure:\"plugin-processors-path,omitempty\" yaml:\"plugin-processors-path,omitempty\" json:\"plugin-processors-path,omitempty\"`\n}\n\ntype LocalFlags struct {\n\t// Capabilities\n\tCapabilitiesVersion bool `mapstructure:\"capabilities-version,omitempty\" json:\"capabilities-version,omitempty\" yaml:\"capabilities-version,omitempty\"`\n\t// Get\n\tGetPath       []string `mapstructure:\"get-path,omitempty\" json:\"get-path,omitempty\" yaml:\"get-path,omitempty\"`\n\tGetPrefix     string   `mapstructure:\"get-prefix,omitempty\" json:\"get-prefix,omitempty\" yaml:\"get-prefix,omitempty\"`\n\tGetModel      []string `mapstructure:\"get-model,omitempty\" json:\"get-model,omitempty\" yaml:\"get-model,omitempty\"`\n\tGetType       string   `mapstructure:\"get-type,omitempty\" json:\"get-type,omitempty\" yaml:\"get-type,omitempty\"`\n\tGetTarget     string   `mapstructure:\"get-target,omitempty\" json:\"get-target,omitempty\" yaml:\"get-target,omitempty\"`\n\tGetValuesOnly bool     `mapstructure:\"get-values-only,omitempty\" json:\"get-values-only,omitempty\" yaml:\"get-values-only,omitempty\"`\n\tGetProcessor  []string `mapstructure:\"get-processor,omitempty\" json:\"get-processor,omitempty\" yaml:\"get-processor,omitempty\"`\n\tGetDepth      uint32   `mapstructure:\"get-depth,omitempty\" yaml:\"get-depth,omitempty\" json:\"get-depth,omitempty\"`\n\tGetDryRun     bool     `mapstructure:\"get-dry-run,omitempty\" json:\"get-dry-run,omitempty\" yaml:\"get-dry-run,omitempty\"`\n\t// Set\n\tSetPrefix                 string        `mapstructure:\"set-prefix,omitempty\" json:\"set-prefix,omitempty\" yaml:\"set-prefix,omitempty\"`\n\tSetDelete                 []string      `mapstructure:\"set-delete,omitempty\" json:\"set-delete,omitempty\" yaml:\"set-delete,omitempty\"`\n\tSetReplace                []string      `mapstructure:\"set-replace,omitempty\" json:\"set-replace,omitempty\" yaml:\"set-replace,omitempty\"`\n\tSetUnionReplace           []string      `mapstructure:\"set-union-replace,omitempty\" json:\"set-union-replace,omitempty\" yaml:\"set-union-replace,omitempty\"`\n\tSetUpdate                 []string      `mapstructure:\"set-update,omitempty\" json:\"set-update,omitempty\" yaml:\"set-update,omitempty\"`\n\tSetReplacePath            []string      `mapstructure:\"set-replace-path,omitempty\" json:\"set-replace-path,omitempty\" yaml:\"set-replace-path,omitempty\"`\n\tSetUpdatePath             []string      `mapstructure:\"set-update-path,omitempty\" json:\"set-update-path,omitempty\" yaml:\"set-update-path,omitempty\"`\n\tSetReplaceFile            []string      `mapstructure:\"set-replace-file,omitempty\" json:\"set-replace-file,omitempty\" yaml:\"set-replace-file,omitempty\"`\n\tSetUpdateFile             []string      `mapstructure:\"set-update-file,omitempty\" json:\"set-update-file,omitempty\" yaml:\"set-update-file,omitempty\"`\n\tSetReplaceValue           []string      `mapstructure:\"set-replace-value,omitempty\" json:\"set-replace-value,omitempty\" yaml:\"set-replace-value,omitempty\"`\n\tSetUpdateValue            []string      `mapstructure:\"set-update-value,omitempty\" json:\"set-update-value,omitempty\" yaml:\"set-update-value,omitempty\"`\n\tSetUnionReplacePath       []string      `mapstructure:\"set-union-replace-path,omitempty\" yaml:\"set-union-replace-path,omitempty\" json:\"set-union-replace-path,omitempty\"`\n\tSetUnionReplaceValue      []string      `mapstructure:\"set-union-replace-value,omitempty\" yaml:\"set-union-replace-value,omitempty\" json:\"set-union-replace-value,omitempty\"`\n\tSetUnionReplaceFile       []string      `mapstructure:\"set-union-replace-file,omitempty\" yaml:\"set-union-replace-file,omitempty\" json:\"set-union-replace-file,omitempty\"`\n\tSetDelimiter              string        `mapstructure:\"set-delimiter,omitempty\" json:\"set-delimiter,omitempty\" yaml:\"set-delimiter,omitempty\"`\n\tSetTarget                 string        `mapstructure:\"set-target,omitempty\" json:\"set-target,omitempty\" yaml:\"set-target,omitempty\"`\n\tSetRequestFile            []string      `mapstructure:\"set-request-file,omitempty\" json:\"set-request-file,omitempty\" yaml:\"set-request-file,omitempty\"`\n\tSetRequestVars            string        `mapstructure:\"set-request-vars,omitempty\" json:\"set-request-vars,omitempty\" yaml:\"set-request-vars,omitempty\"`\n\tSetRequestProtoFile       []string      `mapstructure:\"set-proto-request-file,omitempty\" yaml:\"set-proto-request-file,omitempty\" json:\"set-proto-request-file,omitempty\"`\n\tSetDryRun                 bool          `mapstructure:\"set-dry-run,omitempty\" json:\"set-dry-run,omitempty\" yaml:\"set-dry-run,omitempty\"`\n\tSetNoTrim                 bool          `mapstructure:\"set-no-trim,omitempty\" json:\"set-no-trim,omitempty\" yaml:\"set-no-trim,omitempty\"`\n\tSetReplaceCli             []string      `mapstructure:\"set-replace-cli,omitempty\" yaml:\"set-replace-cli,omitempty\" json:\"set-replace-cli,omitempty\"`\n\tSetReplaceCliFile         string        `mapstructure:\"set-replace-cli-file,omitempty\" yaml:\"set-replace-cli-file,omitempty\" json:\"set-replace-cli-file,omitempty\"`\n\tSetUpdateCli              []string      `mapstructure:\"set-update-cli,omitempty\" yaml:\"set-update-cli,omitempty\" json:\"set-update-cli,omitempty\"`\n\tSetUpdateCliFile          string        `mapstructure:\"set-update-cli-file,omitempty\" yaml:\"set-update-cli-file,omitempty\" json:\"set-update-cli-file,omitempty\"`\n\tSetCommitId               string        `mapstructure:\"set-commit-id,omitempty\" yaml:\"set-commit-id,omitempty\" json:\"set-commit-id,omitempty\"`\n\tSetCommitRequest          bool          `mapstructure:\"set-commit-request,omitempty\" yaml:\"set-commit-request,omitempty\" json:\"set-commit-request,omitempty\"`\n\tSetCommitRollbackDuration time.Duration `mapstructure:\"set-commit-rollback-duration,omitempty\" yaml:\"set-commit-rollback-duration,omitempty\" json:\"set-commit-rollback-duration,omitempty\"`\n\tSetCommitCancel           bool          `mapstructure:\"set-commit-cancel,omitempty\" yaml:\"set-commit-cancel,omitempty\" json:\"set-commit-cancel,omitempty\"`\n\tSetCommitConfirm          bool          `mapstructure:\"set-commit-confirm,omitempty\" yaml:\"set-commit-confirm,omitempty\" json:\"set-commit-confirm,omitempty\"`\n\t// Sub\n\tSubscribePrefix            string        `mapstructure:\"subscribe-prefix,omitempty\" json:\"subscribe-prefix,omitempty\" yaml:\"subscribe-prefix,omitempty\"`\n\tSubscribePath              []string      `mapstructure:\"subscribe-path,omitempty\" json:\"subscribe-path,omitempty\" yaml:\"subscribe-path,omitempty\"`\n\tSubscribeQos               uint32        `mapstructure:\"subscribe-qos,omitempty\" json:\"subscribe-qos,omitempty\" yaml:\"subscribe-qos,omitempty\"`\n\tSubscribeUpdatesOnly       bool          `mapstructure:\"subscribe-updates-only,omitempty\" json:\"subscribe-updates-only,omitempty\" yaml:\"subscribe-updates-only,omitempty\"`\n\tSubscribeMode              string        `mapstructure:\"subscribe-mode,omitempty\" json:\"subscribe-mode,omitempty\" yaml:\"subscribe-mode,omitempty\"`\n\tSubscribeStreamMode        string        `mapstructure:\"subscribe-stream_mode,omitempty\" json:\"subscribe-stream-mode,omitempty\" yaml:\"subscribe-stream-mode,omitempty\"`\n\tSubscribeSampleInterval    time.Duration `mapstructure:\"subscribe-sample-interval,omitempty\" json:\"subscribe-sample-interval,omitempty\" yaml:\"subscribe-sample-interval,omitempty\"`\n\tSubscribeSuppressRedundant bool          `mapstructure:\"subscribe-suppress-redundant,omitempty\" json:\"subscribe-suppress-redundant,omitempty\" yaml:\"subscribe-suppress-redundant,omitempty\"`\n\tSubscribeHeartbeatInterval time.Duration `mapstructure:\"subscribe-heartbeat-interval,omitempty\" json:\"subscribe-heartbeat-interval,omitempty\" yaml:\"subscribe-heartbeat-interval,omitempty\"`\n\tSubscribeModel             []string      `mapstructure:\"subscribe-model,omitempty\" json:\"subscribe-model,omitempty\" yaml:\"subscribe-model,omitempty\"`\n\tSubscribeQuiet             bool          `mapstructure:\"subscribe-quiet,omitempty\" json:\"subscribe-quiet,omitempty\" yaml:\"subscribe-quiet,omitempty\"`\n\tSubscribeTarget            string        `mapstructure:\"subscribe-target,omitempty\" json:\"subscribe-target,omitempty\" yaml:\"subscribe-target,omitempty\"`\n\tSubscribeSetTarget         bool          `mapstructure:\"subscribe-set-target,omitempty\" json:\"subscribe-set-target,omitempty\" yaml:\"subscribe-set-target,omitempty\"`\n\tSubscribeName              []string      `mapstructure:\"subscribe-name,omitempty\" json:\"subscribe-name,omitempty\" yaml:\"subscribe-name,omitempty\"`\n\tSubscribeOutput            []string      `mapstructure:\"subscribe-output,omitempty\" json:\"subscribe-output,omitempty\" yaml:\"subscribe-output,omitempty\"`\n\tSubscribeWatchConfig       bool          `mapstructure:\"subscribe-watch-config,omitempty\" json:\"subscribe-watch-config,omitempty\" yaml:\"subscribe-watch-config,omitempty\"`\n\tSubscribeBackoff           time.Duration `mapstructure:\"subscribe-backoff,omitempty\" json:\"subscribe-backoff,omitempty\" yaml:\"subscribe-backoff,omitempty\"`\n\tSubscribeLockRetry         time.Duration `mapstructure:\"subscribe-lock-retry,omitempty\" json:\"subscribe-lock-retry,omitempty\" yaml:\"subscribe-lock-retry,omitempty\"`\n\tSubscribeHistorySnapshot   string        `mapstructure:\"subscribe-history-snapshot,omitempty\" json:\"subscribe-history-snapshot,omitempty\" yaml:\"subscribe-history-snapshot,omitempty\"`\n\tSubscribeHistoryStart      string        `mapstructure:\"subscribe-history-start,omitempty\" json:\"subscribe-history-start,omitempty\" yaml:\"subscribe-history-start,omitempty\"`\n\tSubscribeHistoryEnd        string        `mapstructure:\"subscribe-history-end,omitempty\" json:\"subscribe-history-end,omitempty\" yaml:\"subscribe-history-end,omitempty\"`\n\tSubscribeDepth             uint32        `mapstructure:\"subscribe-depth,omitempty\" yaml:\"subscribe-depth,omitempty\" json:\"subscribe-depth,omitempty\"`\n\t// Path\n\tPathPathType   string `mapstructure:\"path-path-type,omitempty\" json:\"path-path-type,omitempty\" yaml:\"path-path-type,omitempty\"`\n\tPathWithDescr  bool   `mapstructure:\"path-descr,omitempty\" json:\"path-descr,omitempty\" yaml:\"path-descr,omitempty\"`\n\tPathWithPrefix bool   `mapstructure:\"path-with-prefix,omitempty\" json:\"path-with-prefix,omitempty\" yaml:\"path-with-prefix,omitempty\"`\n\tPathWithTypes  bool   `mapstructure:\"path-types,omitempty\" json:\"path-types,omitempty\" yaml:\"path-types,omitempty\"`\n\tPathSearch     bool   `mapstructure:\"path-search,omitempty\" json:\"path-search,omitempty\" yaml:\"path-search,omitempty\"`\n\tPathState      bool   `mapstructure:\"path-state,omitempty\" json:\"path-state,omitempty\" yaml:\"path-state,omitempty\"`\n\tPathConfig     bool   `mapstructure:\"path-config,omitempty\" json:\"path-config,omitempty\" yaml:\"path-config,omitempty\"`\n\t// Prompt\n\tPromptFile                  []string `mapstructure:\"prompt-file,omitempty\" json:\"prompt-file,omitempty\" yaml:\"prompt-file,omitempty\"`\n\tPromptExclude               []string `mapstructure:\"prompt-exclude,omitempty\" json:\"prompt-exclude,omitempty\" yaml:\"prompt-exclude,omitempty\"`\n\tPromptDir                   []string `mapstructure:\"prompt-dir,omitempty\" json:\"prompt-dir,omitempty\" yaml:\"prompt-dir,omitempty\"`\n\tPromptMaxSuggestions        uint16   `mapstructure:\"prompt-max-suggestions,omitempty\" json:\"prompt-max-suggestions,omitempty\" yaml:\"prompt-max-suggestions,omitempty\"`\n\tPromptPrefixColor           string   `mapstructure:\"prompt-prefix-color,omitempty\" json:\"prompt-prefix-color,omitempty\" yaml:\"prompt-prefix-color,omitempty\"`\n\tPromptSuggestionsBGColor    string   `mapstructure:\"prompt-suggestions-bg-color,omitempty\" json:\"prompt-suggestions-bg-color,omitempty\" yaml:\"prompt-suggestions-bg-color,omitempty\"`\n\tPromptDescriptionBGColor    string   `mapstructure:\"prompt-description-bg-color,omitempty\" json:\"prompt-description-bg-color,omitempty\" yaml:\"prompt-description-bg-color,omitempty\"`\n\tPromptSuggestAllFlags       bool     `mapstructure:\"prompt-suggest-all-flags,omitempty\" json:\"prompt-suggest-all-flags,omitempty\" yaml:\"prompt-suggest-all-flags,omitempty\"`\n\tPromptDescriptionWithPrefix bool     `mapstructure:\"prompt-description-with-prefix,omitempty\" json:\"prompt-description-with-prefix,omitempty\" yaml:\"prompt-description-with-prefix,omitempty\"`\n\tPromptDescriptionWithTypes  bool     `mapstructure:\"prompt-description-with-types,omitempty\" json:\"prompt-description-with-types,omitempty\" yaml:\"prompt-description-with-types,omitempty\"`\n\tPromptSuggestWithOrigin     bool     `mapstructure:\"prompt-suggest-with-origin,omitempty\" json:\"prompt-suggest-with-origin,omitempty\" yaml:\"prompt-suggest-with-origin,omitempty\"`\n\t// Listen\n\tListenMaxConcurrentStreams uint32 `mapstructure:\"listen-max-concurrent-streams,omitempty\" json:\"listen-max-concurrent-streams,omitempty\" yaml:\"listen-max-concurrent-streams,omitempty\"`\n\tListenPrometheusAddress    string `mapstructure:\"listen-prometheus-address,omitempty\" json:\"listen-prometheus-address,omitempty\" yaml:\"listen-prometheus-address,omitempty\"`\n\t// VersionUpgrade\n\tUpgradeUsePkg bool `mapstructure:\"upgrade-use-pkg\" json:\"upgrade-use-pkg,omitempty\" yaml:\"upgrade-use-pkg,omitempty\"`\n\t// GetSet\n\tGetSetPrefix    string   `mapstructure:\"getset-prefix,omitempty\" json:\"getset-prefix,omitempty\" yaml:\"getset-prefix,omitempty\"`\n\tGetSetGet       string   `mapstructure:\"getset-get,omitempty\" json:\"getset-get,omitempty\" yaml:\"getset-get,omitempty\"`\n\tGetSetModel     []string `mapstructure:\"get-set-model,omitempty\" yaml:\"get-set-model,omitempty\" json:\"get-set-model,omitempty\"`\n\tGetSetTarget    string   `mapstructure:\"getset-target,omitempty\" json:\"getset-target,omitempty\" yaml:\"getset-target,omitempty\"`\n\tGetSetType      string   `mapstructure:\"getset-type,omitempty\" json:\"getset-type,omitempty\" yaml:\"getset-type,omitempty\"`\n\tGetSetCondition string   `mapstructure:\"getset-condition,omitempty\" json:\"getset-condition,omitempty\" yaml:\"getset-condition,omitempty\"`\n\tGetSetUpdate    string   `mapstructure:\"getset-update,omitempty\" json:\"getset-update,omitempty\" yaml:\"getset-update,omitempty\"`\n\tGetSetReplace   string   `mapstructure:\"getset-replace,omitempty\" json:\"getset-replace,omitempty\" yaml:\"getset-replace,omitempty\"`\n\tGetSetDelete    string   `mapstructure:\"getset-delete,omitempty\" json:\"getset-delete,omitempty\" yaml:\"getset-delete,omitempty\"`\n\tGetSetValue     string   `mapstructure:\"getset-value,omitempty\" json:\"getset-value,omitempty\" yaml:\"getset-value,omitempty\"`\n\t// Generate\n\tGenerateOutput     string `mapstructure:\"generate-output,omitempty\" json:\"generate-output,omitempty\" yaml:\"generate-output,omitempty\"`\n\tGenerateJSON       bool   `mapstructure:\"generate-json,omitempty\" json:\"generate-json,omitempty\" yaml:\"generate-json,omitempty\"`\n\tGenerateConfigOnly bool   `mapstructure:\"generate-config-only,omitempty\" json:\"generate-config-only,omitempty\" yaml:\"generate-config-only,omitempty\"`\n\tGeneratePath       string `mapstructure:\"generate-path,omitempty\" json:\"generate-path,omitempty\" yaml:\"generate-path,omitempty\"`\n\tGenerateCamelCase  bool   `mapstructure:\"generate-camel-case,omitempty\" json:\"generate-camel-case,omitempty\" yaml:\"generate-camel-case,omitempty\"`\n\tGenerateSnakeCase  bool   `mapstructure:\"generate-snake-case,omitempty\" json:\"generate-snake-case,omitempty\" yaml:\"generate-snake-case,omitempty\"`\n\t// Generate Set Request\n\tGenerateSetRequestUpdatePath  []string `mapstructure:\"generate-update-path,omitempty\" json:\"generate-update-path,omitempty\" yaml:\"generate-update-path,omitempty\"`\n\tGenerateSetRequestReplacePath []string `mapstructure:\"generate-replace-path,omitempty\" json:\"generate-replace-path,omitempty\" yaml:\"generate-replace-path,omitempty\"`\n\t// Generate path\n\tGeneratePathWithDescr     bool   `mapstructure:\"generate-descr,omitempty\" json:\"generate-descr,omitempty\" yaml:\"generate-descr,omitempty\"`\n\tGeneratePathWithPrefix    bool   `mapstructure:\"generate-with-prefix,omitempty\" json:\"generate-with-prefix,omitempty\" yaml:\"generate-with-prefix,omitempty\"`\n\tGeneratePathWithTypes     bool   `mapstructure:\"generate-types,omitempty\" json:\"generate-types,omitempty\" yaml:\"generate-types,omitempty\"`\n\tGeneratePathSearch        bool   `mapstructure:\"generate-search,omitempty\" json:\"generate-search,omitempty\" yaml:\"generate-search,omitempty\"`\n\tGeneratePathPathType      string `mapstructure:\"generate-path-path-type,omitempty\" json:\"generate-path-path-type,omitempty\" yaml:\"generate-path-path-type,omitempty\"`\n\tGeneratePathState         bool   `mapstructure:\"generate-path-state,omitempty\" json:\"generate-path-state,omitempty\" yaml:\"generate-path-state,omitempty\"`\n\tGeneratePathConfig        bool   `mapstructure:\"generate-path-config,omitempty\" json:\"generate-path-config,omitempty\" yaml:\"generate-path-config,omitempty\"`\n\tGeneratePathWithNonLeaves bool   `mapstructure:\"generate-path-with-non-leaves,omitempty\" json:\"generate-path-with-non-leaves,omitempty\" yaml:\"generate-path-with-non-leaves,omitempty\"`\n\t//\n\tDiffPath                []string `mapstructure:\"diff-path,omitempty\" json:\"diff-path,omitempty\" yaml:\"diff-path,omitempty\"`\n\tDiffPrefix              string   `mapstructure:\"diff-prefix,omitempty\" json:\"diff-prefix,omitempty\" yaml:\"diff-prefix,omitempty\"`\n\tDiffModel               []string `mapstructure:\"diff-model,omitempty\" json:\"diff-model,omitempty\" yaml:\"diff-model,omitempty\"`\n\tDiffType                string   `mapstructure:\"diff-type,omitempty\" json:\"diff-type,omitempty\" yaml:\"diff-type,omitempty\"`\n\tDiffTarget              string   `mapstructure:\"diff-target,omitempty\" json:\"diff-target,omitempty\" yaml:\"diff-target,omitempty\"`\n\tDiffSub                 bool     `mapstructure:\"diff-sub,omitempty\" json:\"diff-sub,omitempty\" yaml:\"diff-sub,omitempty\"`\n\tDiffRef                 string   `mapstructure:\"diff-ref,omitempty\" json:\"diff-ref,omitempty\" yaml:\"diff-ref,omitempty\"`\n\tDiffCompare             []string `mapstructure:\"diff-compare,omitempty\" json:\"diff-compare,omitempty\" yaml:\"diff-compare,omitempty\"`\n\tDiffQos                 uint32   `mapstructure:\"diff-qos,omitempty\" json:\"diff-qos,omitempty\" yaml:\"diff-qos,omitempty\"`\n\tDiffSetRequestRef       string   `mapstructure:\"diff-setrequest-ref,omitempty\" json:\"diff-setrequest-ref,omitempty\" yaml:\"diff-setrequest-ref,omitempty\"`\n\tDiffSetRequestNew       string   `mapstructure:\"diff-setrequest-new,omitempty\" json:\"diff-setrequest-new,omitempty\" yaml:\"diff-setrequest-new,omitempty\"`\n\tDiffSetRequestFull      bool     `mapstructure:\"diff-setrequest-full,omitempty\" json:\"diff-setrequest-full,omitempty\" yaml:\"diff-setrequest-full,omitempty\"`\n\tDiffSetToNotifsSet      string   `mapstructure:\"diff-set-to-notifs-set,omitempty\" json:\"diff-set-to-notifs-set,omitempty\" yaml:\"diff-set-to-notifs-set,omitempty\"`\n\tDiffSetToNotifsResponse string   `mapstructure:\"diff-set-to-notifs-response,omitempty\" json:\"diff-set-to-notifs-response,omitempty\" yaml:\"diff-set-to-notifs-response,omitempty\"`\n\tDiffSetToNotifsFull     bool     `mapstructure:\"diff-set-to-notifs-full,omitempty\" json:\"diff-set-to-notifs-full,omitempty\" yaml:\"diff-set-to-notifs-full,omitempty\"`\n\t//\n\tTunnelServerSubscribe bool `mapstructure:\"tunnel-server-subscribe,omitempty\" yaml:\"tunnel-server-subscribe,omitempty\" json:\"tunnel-server-subscribe,omitempty\"`\n\t// Processor\n\tProcessorInput          string   `mapstructure:\"processor-input,omitempty\" yaml:\"processor-input,omitempty\" json:\"processor-input,omitempty\"`\n\tProcessorInputDelimiter string   `mapstructure:\"processor-input-delimiter,omitempty\" yaml:\"processor-input-delimiter,omitempty\" json:\"processor-input-delimiter,omitempty\"`\n\tProcessorName           []string `mapstructure:\"processor-name,omitempty\" yaml:\"processor-name,omitempty\" json:\"processor-name,omitempty\"`\n\tProcessorOutput         string   `mapstructure:\"processor-output,omitempty\" yaml:\"processor-output,omitempty\" json:\"processor-output,omitempty\"`\n\t// Tree\n\tTreeFlat    bool `mapstructure:\"tree-flat,omitempty\" yaml:\"tree-flat,omitempty\" json:\"tree-flat,omitempty\"`\n\tTreeDetails bool `mapstructure:\"tree-details,omitempty\" yaml:\"tree-details,omitempty\" json:\"tree-details,omitempty\"`\n}\n\nfunc New() *Config {\n\treturn &Config{\n\t\tGlobalFlags{},\n\t\tLocalFlags{},\n\t\tviper.NewWithOptions(viper.KeyDelimiter(\"/\")),\n\t\tmake(map[string]*types.TargetConfig),\n\t\tmake(map[string]*types.SubscriptionConfig),\n\t\tmake(map[string]map[string]interface{}),\n\t\tmake(map[string]map[string]interface{}),\n\t\tmake(map[string]map[string]interface{}),\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t\tlog.New(io.Discard, configLogPrefix, utils.DefaultLoggingFlags),\n\t\tnil,\n\t\tmake(map[string]interface{}),\n\t}\n}\n\nfunc (c *Config) Load(ctx context.Context) error {\n\tc.FileConfig.SetEnvPrefix(envPrefix)\n\tc.FileConfig.SetEnvKeyReplacer(strings.NewReplacer(\"/\", \"_\", \"-\", \"_\"))\n\tc.FileConfig.AutomaticEnv()\n\tif c.GlobalFlags.CfgFile != \"\" {\n\t\t// configuration file path is explicitly set\n\t\tc.FileConfig.SetConfigFile(c.GlobalFlags.CfgFile)\n\t\tconfigBytes, err := gfile.ReadFile(ctx, c.FileConfig.ConfigFileUsed())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.FileConfig.ReadConfig(bytes.NewBuffer(configBytes))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// discover gnmic config file\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.FileConfig.AddConfigPath(\".\")\n\t\tc.FileConfig.AddConfigPath(home)\n\t\tc.FileConfig.AddConfigPath(xdg.ConfigHome)\n\t\tc.FileConfig.AddConfigPath(xdg.ConfigHome + \"/gnmic\")\n\t\tc.FileConfig.SetConfigName(configName)\n\t\terr = c.FileConfig.ReadInConfig()\n\t\tif err != nil {\n\t\t\tif _, ok := err.(viper.ConfigFileNotFoundError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\terr := c.FileConfig.Unmarshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.mergeEnvVars()\n\treturn c.expandOSPathFlagValues()\n}\n\nfunc (c *Config) ToStore(s store.Store[any]) error {\n\ttargets := make(map[string]any)\n\tsubscriptions := make(map[string]any)\n\tprocessors := make(map[string]any)\n\toutputs := make(map[string]any)\n\tinputs := make(map[string]any)\n\tactions := make(map[string]any)\n\t_, err := c.GetTargets()\n\tif err != nil {\n\t\tif !errors.Is(err, ErrNoTargetsFound) {\n\t\t\treturn err\n\t\t}\n\t}\n\t// targets\n\tfor n, t := range c.Targets {\n\t\ttargets[n] = t\n\t}\n\t// subscriptions\n\tfor n, s := range c.Subscriptions {\n\t\tsubscriptions[n] = s\n\t}\n\t// processors\n\tfor n, p := range c.Processors {\n\t\tprocessors[n] = p\n\t}\n\t// outputs\n\tfor n, o := range c.Outputs {\n\t\toutputs[n] = o\n\t}\n\t// inputs\n\tfor n, i := range c.Inputs {\n\t\tinputs[n] = i\n\t}\n\t// actions\n\tfor n, a := range c.Actions {\n\t\tactions[n] = a\n\t}\n\t// set all\n\terr = s.SetAll(\"targets\", targets)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.SetAll(\"subscriptions\", subscriptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// actions\n\terr = s.SetAll(\"actions\", actions)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.SetAll(\"processors\", processors)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.SetAll(\"outputs\", outputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = s.SetAll(\"inputs\", inputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//\n\t_, err = s.Set(\"global-flags\", \"global-flags\", c.GlobalFlags)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// clustering\n\t_, err = s.Set(\"clustering\", \"clustering\", c.Clustering)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// gnmi server\n\t_, err = s.Set(\"gnmi-server\", \"gnmi-server\", c.GnmiServer)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// api server\n\t_, err = s.Set(\"api-server\", \"api-server\", c.APIServer)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// loader\n\t_, err = s.Set(\"loader\", \"loader\", c.Loader)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// tunnel server\n\t_, err = s.Set(\"tunnel-server\", \"tunnel-server\", c.TunnelServer)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Config) SetLogger() (io.Writer, int, error) {\n\tvar f io.Writer = io.Discard\n\tvar loggingFlags = c.logger.Flags()\n\tvar err error\n\n\tif c.LogFile != \"\" {\n\t\tif c.LogMaxSize > 0 {\n\t\t\tf = &lumberjack.Logger{\n\t\t\t\tFilename:   c.LogFile,\n\t\t\t\tMaxSize:    c.LogMaxSize,\n\t\t\t\tMaxBackups: c.LogMaxBackups,\n\t\t\t\tCompress:   c.LogCompress,\n\t\t\t}\n\t\t} else {\n\t\t\tf, err = os.OpenFile(c.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif c.Debug {\n\t\t\tc.Log = true\n\t\t}\n\t\tif c.Log {\n\t\t\tf = os.Stderr\n\t\t}\n\t}\n\tif c.Debug {\n\t\tloggingFlags |= log.Llongfile\n\t}\n\tc.logger.SetOutput(f)\n\tc.logger.SetFlags(loggingFlags)\n\treturn f, loggingFlags, nil\n}\n\nfunc (c *Config) SetPersistentFlagsFromFile(cmd *cobra.Command) {\n\t// set debug and log values from file before other persistent flags\n\tcmd.PersistentFlags().VisitAll(func(f *pflag.Flag) {\n\t\tif f.Name == \"debug\" || f.Name == \"log\" {\n\t\t\tif !f.Changed && c.FileConfig.IsSet(f.Name) {\n\t\t\t\tc.setFlagValue(cmd, f.Name, c.FileConfig.Get(f.Name))\n\t\t\t}\n\t\t}\n\t})\n\t//\n\tcmd.PersistentFlags().VisitAll(func(f *pflag.Flag) {\n\t\tif f.Name == \"debug\" || f.Name == \"log\" {\n\t\t\treturn\n\t\t}\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"cmd=%s, flagName=%s, changed=%v, isSetInFile=%v\",\n\t\t\t\tcmd.Name(), f.Name, f.Changed, c.FileConfig.IsSet(f.Name))\n\t\t}\n\t\tif !f.Changed && c.FileConfig.IsSet(f.Name) {\n\t\t\tc.setFlagValue(cmd, f.Name, c.FileConfig.Get(f.Name))\n\t\t}\n\t})\n}\n\nfunc (c *Config) SetLocalFlagsFromFile(cmd *cobra.Command) {\n\tcmd.LocalFlags().VisitAll(func(f *pflag.Flag) {\n\t\tflagName := fmt.Sprintf(\"%s-%s\", cmd.Name(), f.Name)\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"cmd=%s, flagName=%s, changed=%v, isSetInFile=%v\",\n\t\t\t\tcmd.Name(), f.Name, f.Changed, c.FileConfig.IsSet(flagName))\n\t\t}\n\t\tif !f.Changed && c.FileConfig.IsSet(flagName) {\n\t\t\tc.setFlagValue(cmd, f.Name, c.FileConfig.Get(flagName))\n\t\t}\n\t})\n}\n\nfunc (c *Config) setFlagValue(cmd *cobra.Command, fName string, val interface{}) {\n\tswitch val := val.(type) {\n\tcase []interface{}:\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"cmd=%s, flagName=%s, valueType=%T, length=%d, value=%#v\",\n\t\t\t\tcmd.Name(), fName, val, len(val), val)\n\t\t}\n\t\tnVal := make([]string, 0, len(val))\n\t\tfor _, v := range val {\n\t\t\tnVal = append(nVal, fmt.Sprintf(\"%v\", v))\n\t\t}\n\t\tcmd.Flags().Set(fName, strings.Join(nVal, \",\"))\n\tdefault:\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"cmd=%s, flagName=%s, valueType=%T, value=%#v\",\n\t\t\t\tcmd.Name(), fName, val, val)\n\t\t}\n\t\tcmd.Flags().Set(fName, fmt.Sprintf(\"%v\", val))\n\t}\n}\n\nfunc flagIsSet(cmd *cobra.Command, name string) bool {\n\tif cmd == nil {\n\t\treturn false\n\t}\n\tvar isSet bool\n\tcmd.Flags().VisitAll(func(f *pflag.Flag) {\n\t\tif f.Name == name && f.Changed {\n\t\t\tisSet = true\n\t\t\treturn\n\t\t}\n\t})\n\treturn isSet\n}\n\nfunc (c *Config) CreateGetRequest(tc *types.TargetConfig) (*gnmi.GetRequest, error) {\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"%w\", ErrInvalidConfig)\n\t}\n\tgnmiOpts := make([]api.GNMIOption, 0, 4+len(c.LocalFlags.GetPath))\n\tenc := c.Encoding\n\tif tc.Encoding != nil {\n\t\tenc = *tc.Encoding\n\t}\n\tgnmiOpts = append(gnmiOpts,\n\t\tapi.Encoding(enc),\n\t\tapi.DataType(c.LocalFlags.GetType),\n\t\tapi.Prefix(c.LocalFlags.GetPrefix),\n\t\tapi.Target(c.LocalFlags.GetTarget),\n\t)\n\tfor _, p := range c.LocalFlags.GetPath {\n\t\tgnmiOpts = append(gnmiOpts, api.Path(strings.TrimSpace(p)))\n\t}\n\tif c.LocalFlags.GetDepth > 0 {\n\t\tgnmiOpts = append(gnmiOpts, api.Extension_Depth(c.LocalFlags.GetDepth))\n\t}\n\n\texts, err := c.parseAdditionalRequestExtensions()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgnmiOpts = append(gnmiOpts, exts...)\n\n\treturn api.NewGetRequest(gnmiOpts...)\n}\n\nfunc (c *Config) CreateGASGetRequest() (*gnmi.GetRequest, error) {\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"%w\", ErrInvalidConfig)\n\t}\n\treturn api.NewGetRequest(\n\t\tapi.Encoding(c.Encoding),\n\t\tapi.DataType(c.LocalFlags.GetSetType),\n\t\tapi.Prefix(c.LocalFlags.GetSetPrefix),\n\t\tapi.Target(c.LocalFlags.GetSetTarget),\n\t\tapi.Path(strings.TrimSpace(c.LocalFlags.GetSetGet)))\n}\n\nfunc (c *Config) CreateGASSetRequest(input interface{}) (*gnmi.SetRequest, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, 3)\n\tgnmiOpts = append(gnmiOpts, api.Prefix(c.LocalFlags.GetSetPrefix))\n\tgnmiOpts = append(gnmiOpts, api.Target(c.LocalFlags.GetSetTarget))\n\n\tdelPath, err := c.execPathTemplate(c.LocalFlags.GetSetDelete, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif delPath != \"\" {\n\t\tgnmiOpts = append(gnmiOpts, api.Delete(delPath))\n\t}\n\t//\n\tupdatePath, err := c.execPathTemplate(c.LocalFlags.GetSetUpdate, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treplacePath, err := c.execPathTemplate(c.LocalFlags.GetSetReplace, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, err := c.execValueTemplate(c.LocalFlags.GetSetValue, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif updatePath != \"\" {\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Update(\n\t\t\t\tapi.Path(updatePath),\n\t\t\t\tapi.Value(val, c.Encoding),\n\t\t\t))\n\t} else if replacePath != \"\" {\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Replace(\n\t\t\t\tapi.Path(replacePath),\n\t\t\t\tapi.Value(val, c.Encoding),\n\t\t\t))\n\t}\n\n\treturn api.NewSetRequest(gnmiOpts...)\n}\n\nfunc (c *Config) execPathTemplate(tplString string, input interface{}) (string, error) {\n\tif tplString == \"\" {\n\t\treturn \"\", nil\n\t}\n\ttplString = os.ExpandEnv(tplString)\n\tq, err := gojq.Parse(tplString)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcode, err := gojq.Compile(q)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\titer := code.Run(input)\n\tvar res interface{}\n\tvar ok bool\n\n\tres, ok = iter.Next()\n\tif !ok {\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"jq input: %+v\", input)\n\t\t\tc.logger.Printf(\"jq result: %+v\", res)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unexpected jq result type: %T\", res)\n\t}\n\tswitch v := res.(type) {\n\tcase error:\n\t\treturn \"\", v\n\tcase string:\n\t\tc.logger.Printf(\"path jq expression result: %s\", v)\n\t\treturn v, nil\n\tdefault:\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"jq input: %+v\", input)\n\t\t\tc.logger.Printf(\"jq result: %+v\", v)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unexpected jq result type: %T\", v)\n\t}\n}\n\nfunc (c *Config) execValueTemplate(tplString string, input interface{}) (string, error) {\n\tif tplString == \"\" {\n\t\treturn \"\", nil\n\t}\n\ttplString = os.ExpandEnv(tplString)\n\tq, err := gojq.Parse(tplString)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcode, err := gojq.Compile(q)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\titer := code.Run(input)\n\tvar res interface{}\n\tvar ok bool\n\n\tres, ok = iter.Next()\n\tif !ok {\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"jq input: %+v\", input)\n\t\t\tc.logger.Printf(\"jq result: %+v\", res)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unexpected jq result type: %T\", res)\n\t}\n\tswitch v := res.(type) {\n\tcase error:\n\t\treturn \"\", v\n\tcase string:\n\t\tc.logger.Printf(\"path jq expression result: %s\", v)\n\t\treturn trimQuotes(v), nil\n\tdefault:\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"jq input: %+v\", input)\n\t\t\tc.logger.Printf(\"jq result: %+v\", v)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unexpected jq result type: %T\", v)\n\t}\n}\n\nfunc (c *Config) CreateSetRequest(targetName string) ([]*gnmi.SetRequest, error) {\n\tif len(c.SetRequestProtoFile) > 0 {\n\t\treturn c.CreateSetRequestFromProtoFile()\n\t}\n\tif len(c.SetRequestFile) > 0 {\n\t\treturn c.CreateSetRequestFromFile(targetName)\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"Set input delete: %+v\", &c.LocalFlags.SetDelete)\n\t\tc.logger.Printf(\"Set input update: %+v\", &c.LocalFlags.SetUpdate)\n\t\tc.logger.Printf(\"Set input update path(s): %+v\", &c.LocalFlags.SetUpdatePath)\n\t\tc.logger.Printf(\"Set input update value(s): %+v\", &c.LocalFlags.SetUpdateValue)\n\t\tc.logger.Printf(\"Set input update file(s): %+v\", &c.LocalFlags.SetUpdateFile)\n\t\tc.logger.Printf(\"Set input replace: %+v\", &c.LocalFlags.SetReplace)\n\t\tc.logger.Printf(\"Set input replace path(s): %+v\", &c.LocalFlags.SetReplacePath)\n\t\tc.logger.Printf(\"Set input replace value(s): %+v\", &c.LocalFlags.SetReplaceValue)\n\t\tc.logger.Printf(\"Set input replace file(s): %+v\", &c.LocalFlags.SetReplaceFile)\n\t\tc.logger.Printf(\"Set input union replace path(s): %+v\", &c.LocalFlags.SetUnionReplacePath)\n\t\tc.logger.Printf(\"Set input union replace value(s): %+v\", &c.LocalFlags.SetUnionReplaceValue)\n\t\tc.logger.Printf(\"Set input union replace file(s): %+v\", &c.LocalFlags.SetUnionReplaceFile)\n\t}\n\n\tgnmiOpts := make([]api.GNMIOption, 0, 2+ // prefix+target\n\t\tlen(c.LocalFlags.SetDelete)+len(c.LocalFlags.SetUpdate)+len(c.LocalFlags.SetReplace)+len(c.LocalFlags.SetUnionReplace)+\n\t\tlen(c.LocalFlags.SetUpdatePath)+len(c.LocalFlags.SetReplacePath)+len(c.LocalFlags.SetUnionReplacePath)+\n\t\t1+1+ // updateCli + replaceCli\n\t\t1+1, // updateCliFile + replaceCliFile\n\t)\n\tgnmiOpts = append(gnmiOpts,\n\t\tapi.Prefix(c.LocalFlags.SetPrefix),\n\t\tapi.Target(c.LocalFlags.SetTarget),\n\t)\n\tfor _, p := range c.LocalFlags.SetDelete {\n\t\tgnmiOpts = append(gnmiOpts, api.Delete(strings.TrimSpace(p)))\n\t}\n\n\tfor _, u := range c.LocalFlags.SetUpdate {\n\t\tsingleUpdate := strings.SplitN(u, c.LocalFlags.SetDelimiter, 3)\n\t\tif len(singleUpdate) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"invalid inline update format: %s\", c.LocalFlags.SetUpdate)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Update(\n\t\t\t\tapi.Path(strings.TrimSpace(singleUpdate[0])),\n\t\t\t\tapi.Value(singleUpdate[2], singleUpdate[1]),\n\t\t\t),\n\t\t)\n\t}\n\n\tfor _, r := range c.LocalFlags.SetReplace {\n\t\tsingleReplace := strings.SplitN(r, c.LocalFlags.SetDelimiter, 3)\n\t\tif len(singleReplace) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"invalid inline replace format: %s\", c.LocalFlags.SetReplace)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Replace(\n\t\t\t\tapi.Path(strings.TrimSpace(singleReplace[0])),\n\t\t\t\tapi.Value(singleReplace[2], singleReplace[1]),\n\t\t\t),\n\t\t)\n\t}\n\n\tfor _, r := range c.LocalFlags.SetUnionReplace {\n\t\tsingleUnionReplace := strings.SplitN(r, c.LocalFlags.SetDelimiter, 3)\n\t\tif len(singleUnionReplace) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"invalid inline union-replace format: %s'\", c.LocalFlags.SetReplace)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.UnionReplace(\n\t\t\t\tapi.Path(strings.TrimSpace(singleUnionReplace[0])),\n\t\t\t\tapi.Value(singleUnionReplace[2], singleUnionReplace[1]),\n\t\t\t),\n\t\t)\n\t}\n\n\tuseUpdateFiles := len(c.LocalFlags.SetUpdateFile) > 0 && len(c.LocalFlags.SetUpdateValue) == 0\n\tuseReplaceFiles := len(c.LocalFlags.SetReplaceFile) > 0 && len(c.LocalFlags.SetReplaceValue) == 0\n\tuseUnionReplaceFiles := len(c.LocalFlags.SetUnionReplaceFile) > 0 && len(c.LocalFlags.SetUnionReplaceValue) == 0\n\n\tfor i, p := range c.LocalFlags.SetUpdatePath {\n\t\tvar updOpt api.GNMIOption\n\t\tif useUpdateFiles {\n\t\t\tupdateData, err := readFile(c.LocalFlags.SetUpdateFile[i])\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"error reading data from file '%s': %v\", c.LocalFlags.SetUpdateFile[i], err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttrim := \"\"\n\t\t\tif !c.LocalFlags.SetNoTrim {\n\t\t\t\ttrim = trimChars\n\t\t\t}\n\t\t\tupdOpt = api.Update(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(string(bytes.Trim(updateData, trim)), c.Encoding),\n\t\t\t)\n\n\t\t} else {\n\t\t\tupdOpt = api.Update(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(c.LocalFlags.SetUpdateValue[i], c.Encoding),\n\t\t\t)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, updOpt)\n\t}\n\n\tfor i, p := range c.LocalFlags.SetReplacePath {\n\t\tvar replaceOpt api.GNMIOption\n\t\tif useReplaceFiles {\n\t\t\treplaceData, err := readFile(c.LocalFlags.SetReplaceFile[i])\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"error reading data from file '%s': %v\", c.LocalFlags.SetReplaceFile[i], err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttrim := \"\"\n\t\t\tif !c.LocalFlags.SetNoTrim {\n\t\t\t\ttrim = trimChars\n\t\t\t}\n\t\t\treplaceOpt = api.Replace(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(string(bytes.Trim(replaceData, trim)), c.Encoding),\n\t\t\t)\n\n\t\t} else {\n\t\t\treplaceOpt = api.Replace(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(c.LocalFlags.SetReplaceValue[i], c.Encoding),\n\t\t\t)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, replaceOpt)\n\t}\n\n\tfor i, p := range c.LocalFlags.SetUnionReplacePath {\n\t\tvar unionReplaceOpt api.GNMIOption\n\t\tif useUnionReplaceFiles {\n\t\t\treplaceData, err := readFile(c.LocalFlags.SetUnionReplaceFile[i])\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"error reading data from file '%s': %v\", c.LocalFlags.SetUnionReplaceFile[i], err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttrim := \"\"\n\t\t\tif !c.LocalFlags.SetNoTrim {\n\t\t\t\ttrim = trimChars\n\t\t\t}\n\t\t\tunionReplaceOpt = api.UnionReplace(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(string(bytes.Trim(replaceData, trim)), c.Encoding),\n\t\t\t)\n\n\t\t} else {\n\t\t\tunionReplaceOpt = api.UnionReplace(\n\t\t\t\tapi.Path(strings.TrimSpace(p)),\n\t\t\t\tapi.Value(c.LocalFlags.SetUnionReplaceValue[i], c.Encoding),\n\t\t\t)\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts, unionReplaceOpt)\n\t}\n\n\tif len(c.LocalFlags.SetUpdateCli) > 0 {\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Update(\n\t\t\t\tapi.Path(\"cli:/\"),\n\t\t\t\tapi.Value(strings.Join(c.LocalFlags.SetUpdateCli, \"\\n\"), \"ascii\"),\n\t\t\t),\n\t\t)\n\t}\n\n\tif len(c.LocalFlags.SetReplaceCli) > 0 {\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Replace(\n\t\t\t\tapi.Path(\"cli:/\"),\n\t\t\t\tapi.Value(strings.Join(c.LocalFlags.SetReplaceCli, \"\\n\"), \"ascii\"),\n\t\t\t),\n\t\t)\n\t}\n\n\tif c.LocalFlags.SetUpdateCliFile != \"\" {\n\t\tdata, err := readFile(c.LocalFlags.SetUpdateCliFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Update(\n\t\t\t\tapi.Path(\"cli:/\"),\n\t\t\t\tapi.Value(string(data), \"ascii\"),\n\t\t\t),\n\t\t)\n\t}\n\n\tif c.LocalFlags.SetReplaceCliFile != \"\" {\n\t\tdata, err := readFile(c.LocalFlags.SetReplaceCliFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Replace(\n\t\t\t\tapi.Path(\"cli:/\"),\n\t\t\t\tapi.Value(string(data), \"ascii\"),\n\t\t\t),\n\t\t)\n\t}\n\n\tif c.LocalFlags.SetCommitId != \"\" {\n\t\tif c.LocalFlags.SetCommitRequest {\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Extension_CommitRequest(\n\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\tc.LocalFlags.SetCommitRollbackDuration,\n\t\t\t\t))\n\t\t} else if c.LocalFlags.SetCommitConfirm {\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Extension_CommitConfirm(\n\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t))\n\t\t} else if c.LocalFlags.SetCommitCancel {\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Extension_CommitCancel(\n\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t))\n\t\t} else {\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Extension_CommitSetRollbackDuration(\n\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\tc.LocalFlags.SetCommitRollbackDuration,\n\t\t\t\t))\n\t\t}\n\t}\n\n\texts, err := c.parseAdditionalRequestExtensions()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgnmiOpts = append(gnmiOpts, exts...)\n\n\t//\n\treq, err := api.NewSetRequest(gnmiOpts...)\n\treturn []*gnmi.SetRequest{req}, err\n}\n\n// readFile reads a json or yaml file. the the file is .yaml, converts it to json and returns []byte and an error\nfunc readFile(name string) ([]byte, error) {\n\tdata, err := gfile.ReadFile(context.TODO(), name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch filepath.Ext(name) {\n\tdefault:\n\t\treturn data, nil\n\tcase \".yaml\", \".yml\":\n\t\treturn toJSONBytes(data)\n\t}\n}\n\nfunc toJSONBytes(data []byte) ([]byte, error) {\n\tvar out interface{}\n\tvar err error\n\terr = yaml.Unmarshal(data, &out)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewStruct := convert(out)\n\n\tb := new(bytes.Buffer)\n\tenc := json.NewEncoder(b)\n\tenc.SetEscapeHTML(false)\n\n\terr = enc.Encode(newStruct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b.Bytes(), nil\n}\n\n// SanitizeArrayFlagValue trims trailing and leading brackets ([]),\n// from each of ls elements only if both are present.\nfunc SanitizeArrayFlagValue(ls []string) []string {\n\tres := make([]string, 0, len(ls))\n\tfor i := range ls {\n\t\tif ls[i] == \"[]\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor strings.HasPrefix(ls[i], \"[\") && strings.HasSuffix(ls[i], \"]\") {\n\t\t\tls[i] = ls[i][1 : len(ls[i])-1]\n\t\t}\n\t\tres = append(res, ls[i])\n\t}\n\treturn res\n}\n\nfunc ParseAddressField(addr []string) []string {\n\tres := make([]string, 0, len(addr))\n\tfor i := range addr {\n\t\tif addr[i] == \"[]\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor strings.HasPrefix(addr[i], \"[\") && strings.HasSuffix(addr[i], \"]\") {\n\t\t\taddr[i] = addr[i][1 : len(addr[i])-1]\n\t\t}\n\t\tres = append(res, strings.Split(addr[i], \",\")...)\n\t}\n\treturn res\n}\n\nfunc (c *Config) ValidateSetInput() error {\n\tvar err error\n\tc.LocalFlags.SetDelete = SanitizeArrayFlagValue(c.LocalFlags.SetDelete)\n\tc.LocalFlags.SetUpdate = SanitizeArrayFlagValue(c.LocalFlags.SetUpdate)\n\tc.LocalFlags.SetReplace = SanitizeArrayFlagValue(c.LocalFlags.SetReplace)\n\tc.LocalFlags.SetUpdatePath = SanitizeArrayFlagValue(c.LocalFlags.SetUpdatePath)\n\tc.LocalFlags.SetReplacePath = SanitizeArrayFlagValue(c.LocalFlags.SetReplacePath)\n\tc.LocalFlags.SetUpdateValue = SanitizeArrayFlagValue(c.LocalFlags.SetUpdateValue)\n\tc.LocalFlags.SetReplaceValue = SanitizeArrayFlagValue(c.LocalFlags.SetReplaceValue)\n\tc.LocalFlags.SetUpdateFile = SanitizeArrayFlagValue(c.LocalFlags.SetUpdateFile)\n\tc.LocalFlags.SetReplaceFile = SanitizeArrayFlagValue(c.LocalFlags.SetReplaceFile)\n\tc.LocalFlags.SetRequestFile = SanitizeArrayFlagValue(c.LocalFlags.SetRequestFile)\n\n\tc.LocalFlags.SetUpdateFile, err = ExpandOSPaths(c.LocalFlags.SetUpdateFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.LocalFlags.SetReplaceFile, err = ExpandOSPaths(c.LocalFlags.SetReplaceFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range c.LocalFlags.SetRequestFile {\n\t\tc.LocalFlags.SetRequestFile[i], err = expandOSPath(c.LocalFlags.SetRequestFile[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.LocalFlags.SetRequestVars, err = expandOSPath(c.LocalFlags.SetRequestVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif (len(c.LocalFlags.SetDelete)+len(c.LocalFlags.SetUpdate)+len(c.LocalFlags.SetReplace)+len(c.LocalFlags.SetUnionReplace)) == 0 &&\n\t\t(len(c.LocalFlags.SetUpdatePath)+len(c.LocalFlags.SetReplacePath)+len(c.LocalFlags.SetUnionReplacePath)) == 0 &&\n\t\tlen(c.LocalFlags.SetRequestFile) == 0 &&\n\t\tlen(c.LocalFlags.SetReplaceCli) == 0 &&\n\t\tlen(c.LocalFlags.SetUpdateCli) == 0 &&\n\t\tlen(c.LocalFlags.SetReplaceCliFile) == 0 &&\n\t\tlen(c.LocalFlags.SetUpdateCliFile) == 0 &&\n\t\tlen(c.LocalFlags.SetRequestProtoFile) == 0 &&\n\t\tc.LocalFlags.SetCommitId == \"\" {\n\t\treturn errors.New(\"no paths or request file provided\")\n\t}\n\tif len(c.LocalFlags.SetUpdateFile) > 0 && len(c.LocalFlags.SetUpdateValue) > 0 {\n\t\treturn errors.New(\"set update from file and value are not supported in the same command\")\n\t}\n\tif len(c.LocalFlags.SetReplaceFile) > 0 && len(c.LocalFlags.SetReplaceValue) > 0 {\n\t\treturn errors.New(\"set replace from file and value are not supported in the same command\")\n\t}\n\tif len(c.LocalFlags.SetUnionReplaceFile) > 0 && len(c.LocalFlags.SetUnionReplaceValue) > 0 {\n\t\treturn errors.New(\"set union-replace from file and value are not supported in the same command\")\n\t}\n\tif len(c.LocalFlags.SetUpdatePath) != len(c.LocalFlags.SetUpdateValue) && len(c.LocalFlags.SetUpdatePath) != len(c.LocalFlags.SetUpdateFile) {\n\t\treturn errors.New(\"missing update value/file or path\")\n\t}\n\tif len(c.LocalFlags.SetReplacePath) != len(c.LocalFlags.SetReplaceValue) && len(c.LocalFlags.SetReplacePath) != len(c.LocalFlags.SetReplaceFile) {\n\t\treturn errors.New(\"missing replace value/file or path\")\n\t}\n\tif len(c.LocalFlags.SetUnionReplacePath) != len(c.LocalFlags.SetUnionReplaceValue) && len(c.LocalFlags.SetUnionReplacePath) != len(c.LocalFlags.SetUnionReplaceFile) {\n\t\treturn errors.New(\"missing union-replace value/file or path\")\n\t}\n\treturn nil\n}\n\nfunc ExpandOSPaths(paths []string) ([]string, error) {\n\tvar err error\n\tfor i := range paths {\n\t\tpaths[i], err = expandOSPath(paths[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn paths, nil\n}\n\nfunc expandOSPath(p string) (string, error) {\n\tif p == \"-\" || p == \"\" {\n\t\treturn p, nil\n\t}\n\tif strings.HasPrefix(p, \"http://\") ||\n\t\tstrings.HasPrefix(p, \"https://\") ||\n\t\tstrings.HasPrefix(p, \"sftp://\") ||\n\t\tstrings.HasPrefix(p, \"ftp://\") {\n\t\treturn p, nil\n\t}\n\tnp, err := homedir.Expand(p)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"path %q: %v\", p, err)\n\t}\n\tif !filepath.IsAbs(np) {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"path %q: %v\", p, err)\n\t\t}\n\t\tnp = filepath.Join(cwd, np)\n\t}\n\t_, err = os.Stat(np)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn np, nil\n}\n\nfunc (c *Config) expandOSPathFlagValues() error {\n\tfor _, flagName := range osPathFlags {\n\t\tif c.FileConfig.IsSet(flagName) {\n\t\t\texpandedPath, err := expandOSPath(c.FileConfig.GetString(flagName))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.FileConfig.Set(flagName, expandedPath)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc trimQuotes(s string) string {\n\tif len(s) >= 2 {\n\t\tif s[0] == '\"' && s[len(s)-1] == '\"' {\n\t\t\treturn s[1 : len(s)-1]\n\t\t}\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "pkg/config/config_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nvar createGetRequestTestSet = map[string]struct {\n\tin  *Config\n\tout *gnmi.GetRequest\n\terr error\n}{\n\t\"nil_input\": {\n\t\tin:  nil,\n\t\tout: nil,\n\t\terr: ErrInvalidConfig,\n\t},\n\t\"unknown_encoding_type\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"dummy\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: nil,\n\t\terr: api.ErrInvalidValue,\n\t},\n\t\"invalid_prefix\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPrefix: \"/invalid/]prefix\",\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: nil,\n\t\terr: api.ErrInvalidValue,\n\t},\n\t\"invalid_path\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPrefix: \"/invalid/]path\",\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: nil,\n\t\terr: api.ErrInvalidValue,\n\t},\n\t\"unknown_data_type\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPrefix: \"/valid/path\",\n\t\t\t\tGetType:   \"dummy\",\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: nil,\n\t\terr: api.ErrInvalidValue,\n\t},\n\t\"basic_get_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPath: []string{\"/valid/path\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"get_request_with_type\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPath: []string{\"/valid/path\"},\n\t\t\t\tGetType: \"state\",\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: gnmi.GetRequest_STATE,\n\t\t},\n\t\terr: nil,\n\t},\n\t\"get_request_with_encoding\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"proto\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPath: []string{\"/valid/path\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tEncoding: gnmi.Encoding_PROTO,\n\t\t},\n\t\terr: nil,\n\t},\n\t\"get_request_with_prefix\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"proto\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPrefix: \"/valid/prefix\",\n\t\t\t\tGetPath:   []string{\"/valid/path\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.GetRequest{\n\t\t\tPrefix: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t{Name: \"prefix\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tEncoding: gnmi.Encoding_PROTO,\n\t\t},\n\t\terr: nil,\n\t},\n\t\"get_request_with_2_paths\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tGetPath: []string{\n\t\t\t\t\t\"/valid/path1\",\n\t\t\t\t\t\"/valid/path2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.GetRequest{\n\t\t\tPath: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n}\n\nvar createSetRequestTestSet = map[string]struct {\n\tin  *Config\n\tout *gnmi.SetRequest\n\terr error\n}{\n\n\t\"set_update_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelimiter: \":::\",\n\t\t\t\tSetUpdate:    []string{\"/valid/path:::json:::value\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_replace_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelimiter: \":::\",\n\t\t\t\tSetReplace:   []string{\"/valid/path:::json:::value\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_delete_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelete: []string{\"/valid/path\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_update_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelimiter: \":::\",\n\t\t\t\tSetUpdate: []string{\n\t\t\t\t\t\"/valid/path1:::json:::value1\",\n\t\t\t\t\t\"/valid/path2:::json_ietf:::value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_replace_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelimiter: \":::\",\n\t\t\t\tSetReplace: []string{\n\t\t\t\t\t\"/valid/path1:::json:::value1\",\n\t\t\t\t\t\"/valid/path2:::json_ietf:::value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_delete_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelete: []string{\n\t\t\t\t\t\"/valid/path1\",\n\t\t\t\t\t\"/valid/path2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_combined_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{\n\t\t\t\tSetDelimiter: \":::\",\n\t\t\t\tSetUpdate:    []string{\"/valid/path1:::json:::value1\"},\n\t\t\t\tSetReplace:   []string{\"/valid/path2:::json:::value2\"},\n\t\t\t\tSetDelete:    []string{\"/valid/path\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_update_path_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tSetUpdatePath:  []string{\"/valid/path\"},\n\t\t\t\tSetUpdateValue: []string{\"value\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_replace_path_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tSetReplacePath:  []string{\"/valid/path\"},\n\t\t\t\tSetReplaceValue: []string{\"value\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_union_replace_path_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{\n\t\t\t\tSetUnionReplacePath:  []string{\"/valid/path\"},\n\t\t\t\tSetUnionReplaceValue: []string{\"value\"},\n\t\t\t},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUnionReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n}\n\nvar execPathTemplateTestSet = map[string]struct {\n\ttpl   string\n\tinput interface{}\n\tout   string\n}{\n\t\"nil\": {\n\t\ttpl:   \"\",\n\t\tinput: nil,\n\t\tout:   \"\",\n\t},\n\t\"simple\": {\n\t\ttpl:   `\"/path/\"`,\n\t\tinput: nil,\n\t\tout:   \"/path/\",\n\t},\n\t\"with_an_expression\": {\n\t\ttpl: `\"/interfaces/\" + .name`,\n\t\tinput: map[string]interface{}{\n\t\t\t\"name\": \"interface\",\n\t\t},\n\t\tout: \"/interfaces/interface\",\n\t},\n}\n\nfunc TestCreateGetRequest(t *testing.T) {\n\tfor name, data := range createGetRequestTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgetReq, err := data.in.CreateGetRequest(&types.TargetConfig{})\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", getReq)\n\t\t\tt.Logf(\"exp error: %+v\", data.err)\n\t\t\tt.Logf(\"got error: %+v\", err)\n\t\t\tif err != nil {\n\t\t\t\tuerr := errors.Unwrap(err)\n\t\t\t\tif !errors.Is(uerr, data.err) {\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !testutils.GetRequestsEqual(getReq, data.out) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCreateSetRequest(t *testing.T) {\n\tfor name, data := range createSetRequestTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsetReq, err := data.in.CreateSetRequest(\"\")\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"exp error: %+v\", data.err)\n\t\t\tt.Logf(\"got value: %+v\", setReq)\n\t\t\tt.Logf(\"got error: %+v\", err)\n\t\t\tif err != nil {\n\t\t\t\tif !strings.HasPrefix(err.Error(), data.err.Error()) {\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !testutils.SetRequestsEqual(setReq[0], data.out) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExecPathTemplate(t *testing.T) {\n\tc := New()\n\tc.Debug = true\n\tc.logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\tfor name, data := range execPathTemplateTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\to, err := c.execPathTemplate(data.tpl, data.input)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", o)\n\t\t\tif data.out != o {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/diff.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n)\n\nfunc (c *Config) CreateDiffSubscribeRequest(cmd *cobra.Command) (*gnmi.SubscribeRequest, error) {\n\tsc := &types.SubscriptionConfig{\n\t\tName:     \"diff-sub\",\n\t\tModels:   c.DiffModel,\n\t\tPrefix:   c.DiffPrefix,\n\t\tTarget:   c.DiffTarget,\n\t\tPaths:    c.DiffPath,\n\t\tMode:     \"ONCE\",\n\t\tEncoding: &c.Encoding,\n\t}\n\tif flagIsSet(cmd, \"qos\") {\n\t\tsc.Qos = &c.DiffQos\n\t}\n\treturn utils.CreateSubscribeRequest(sc, nil, c.Encoding)\n}\n\nfunc (c *Config) CreateDiffGetRequest() (*gnmi.GetRequest, error) {\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"%w\", ErrInvalidConfig)\n\t}\n\tgnmiOpts := make([]api.GNMIOption, 0, 4+len(c.LocalFlags.DiffPath))\n\tgnmiOpts = append(gnmiOpts,\n\t\tapi.Encoding(c.Encoding),\n\t\tapi.DataType(c.LocalFlags.DiffType),\n\t\tapi.Prefix(c.LocalFlags.DiffPrefix),\n\t\tapi.Target(c.LocalFlags.DiffTarget),\n\t)\n\tfor _, p := range c.LocalFlags.DiffPath {\n\t\tgnmiOpts = append(gnmiOpts, api.Path(strings.TrimSpace(p)))\n\t}\n\treturn api.NewGetRequest(gnmiOpts...)\n}\n"
  },
  {
    "path": "pkg/config/environment.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/spf13/cobra\"\n\t\"github.com/spf13/pflag\"\n)\n\nfunc envToMap() map[string]any {\n\tm := map[string]any{}\n\tfor _, e := range os.Environ() {\n\t\tif !strings.HasPrefix(e, envPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tpair := strings.SplitN(e, \"=\", 2)\n\t\tif len(pair) < 2 {\n\t\t\tcontinue // malformed env var\n\t\t}\n\t\tpair[0] = strings.ToLower(strings.TrimPrefix(pair[0], envPrefix+\"_\"))\n\t\titems := strings.Split(pair[0], \"_\")\n\t\tmergeMap(m, items, pair[1])\n\t}\n\treturn m\n}\n\nfunc mergeMap(m map[string]any, items []string, v any) {\n\tnItems := len(items)\n\tif nItems == 0 {\n\t\treturn\n\t}\n\tif nItems > 1 {\n\t\tif _, ok := m[items[0]]; !ok {\n\t\t\tm[items[0]] = map[string]any{}\n\t\t}\n\t\tasMap, ok := m[items[0]].(map[string]any)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tmergeMap(asMap, items[1:], v)\n\t\tv = asMap\n\t}\n\tm[items[0]] = v\n}\n\nfunc (c *Config) mergeEnvVars() {\n\tenvs := envToMap()\n\tif c.GlobalFlags.Debug {\n\t\tc.logger.Printf(\"merging env vars: %+v\", envs)\n\t}\n\tc.FileConfig.MergeConfigMap(envs)\n}\n\nfunc (c *Config) SetGlobalsFromEnv(cmd *cobra.Command) {\n\tcmd.PersistentFlags().VisitAll(func(f *pflag.Flag) {\n\t\t// expand password and token global attr only if they start with '$'\n\t\tif f.Name == \"password\" || f.Name == \"token\" {\n\t\t\tif !f.Changed && c.FileConfig.IsSet(f.Name) {\n\t\t\t\tval := c.FileConfig.GetString(f.Name)\n\t\t\t\tif strings.HasPrefix(val, \"$\") {\n\t\t\t\t\tc.setFlagValue(cmd, f.Name, val)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t// other global flags\n\t\tif !f.Changed && c.FileConfig.IsSet(f.Name) {\n\t\t\tif val := os.ExpandEnv(c.FileConfig.GetString(f.Name)); val != \"\" {\n\t\t\t\tc.setFlagValue(cmd, f.Name, val)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc expandMapEnv(m map[string]interface{}, fn func(string, string) string) {\n\tfor f := range m {\n\t\tswitch v := m[f].(type) {\n\t\tcase string:\n\t\t\tm[f] = fn(f, v)\n\t\tcase map[string]interface{}:\n\t\t\texpandMapEnv(v, fn)\n\t\t\tm[f] = v\n\t\tcase []any:\n\t\t\tfor i, item := range v {\n\t\t\t\tswitch item := item.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tv[i] = fn(f, item)\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\texpandMapEnv(item, fn)\n\t\t\t\tcase []any:\n\t\t\t\t\texpandSliceEnv(f, item, fn)\n\t\t\t\t}\n\t\t\t}\n\t\t\tm[f] = v\n\t\t}\n\t}\n}\n\nfunc expandSliceEnv(parent string, s []any, fn func(string, string) string) {\n\tfor i, item := range s {\n\t\tswitch item := item.(type) {\n\t\tcase string:\n\t\t\ts[i] = fn(parent, item)\n\t\tcase map[string]interface{}:\n\t\t\texpandMapEnv(item, fn)\n\t\tcase []any:\n\t\t\texpandSliceEnv(\"\", item, fn)\n\t\t}\n\t}\n}\n\nfunc expandExcept(except ...string) func(string, string) string {\n\treturn func(k, v string) string {\n\t\tfor _, e := range except {\n\t\t\tif k == e {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\treturn os.ExpandEnv(v)\n\t}\n}\n\nfunc expandAll() func(string, string) string {\n\treturn expandExcept()\n}\n"
  },
  {
    "path": "pkg/config/gnmi_ext.go",
    "content": "package config\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/fullstorydev/grpcurl\"\n\t\"github.com/jhump/protoreflect/dynamic\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n\tpkgUtils \"github.com/openconfig/gnmic/pkg/utils\"\n)\n\nfunc createAdditionalRequestExtensions(\n\textensions string,\n\tprotoDir,\n\tprotoFiles []string,\n\textensionDecodeMap utils.RegisteredExtensions,\n) ([]*gnmi_ext.Extension, error) {\n\n\tvar exts []*gnmi_ext.Extension\n\n\tif len(protoFiles) == 0 {\n\t\treturn exts, nil\n\t}\n\n\tdescSource, err := grpcurl.DescriptorSourceFromProtoFiles(protoDir, protoFiles...)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar extensionsMap map[string]any\n\n\tif err := json.Unmarshal([]byte(extensions), &extensionsMap); err != nil {\n\t\treturn nil, fmt.Errorf(\"extensions JSON decoding error: %w\", err)\n\t}\n\n\tfor idMsg, extMsg := range extensionsMap {\n\t\tid, err := strconv.ParseInt(idMsg, 10, 32)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmsg, exists := extensionDecodeMap[int32(id)]\n\n\t\tif !exists {\n\t\t\treturn nil, fmt.Errorf(\"custom extension for the request was not found in the provided registered extensions\")\n\t\t}\n\n\t\tdesc, err := descSource.FindSymbol(msg)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpm := dynamic.NewMessage(desc.GetFile().FindMessage(msg))\n\n\t\tmsgBytes, err := json.Marshal(extMsg)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err = pm.UnmarshalJSON(msgBytes); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\textBytes, err := pm.Marshal()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\text := gnmi_ext.Extension_RegisteredExt{\n\t\t\tRegisteredExt: &gnmi_ext.RegisteredExtension{\n\t\t\t\tId:  gnmi_ext.ExtensionID(id),\n\t\t\t\tMsg: extBytes,\n\t\t\t},\n\t\t}\n\n\t\texts = append(exts, &gnmi_ext.Extension{Ext: &ext})\n\t}\n\n\treturn exts, nil\n}\n\nfunc (c *Config) parseAdditionalRequestExtensions() ([]api.GNMIOption, error) {\n\tgnmiOpts := []api.GNMIOption{}\n\n\tif c.GlobalFlags.RequestExtensions == \"\" {\n\t\treturn gnmiOpts, nil\n\t}\n\n\tregisteredExtensions, err := pkgUtils.ParseRegisteredExtensions(c.GlobalFlags.RegisteredExtensions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\texts, err := createAdditionalRequestExtensions(\n\t\tc.GlobalFlags.RequestExtensions,\n\t\tc.GlobalFlags.ProtoDir,\n\t\tc.GlobalFlags.ProtoFile,\n\t\tregisteredExtensions,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ext := range exts {\n\t\tgnmiOpts = append(gnmiOpts, api.Extension(ext))\n\t}\n\n\treturn gnmiOpts, nil\n}\n"
  },
  {
    "path": "pkg/config/gnmi_server.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"google.golang.org/grpc/keepalive\"\n)\n\nconst (\n\tdefaultAddress           = \":57400\"\n\tdefaultMaxSubscriptions  = 64\n\tdefaultMaxUnaryRPC       = 64\n\tminimumSampleInterval    = 1 * time.Millisecond\n\tdefaultSampleInterval    = 1 * time.Second\n\tminimumHeartbeatInterval = 1 * time.Second\n\t//\n\tdefaultServiceRegistrationAddress = \"localhost:8500\"\n\tdefaultRegistrationCheckInterval  = 5 * time.Second\n\tdefaultMaxServiceFail             = 3\n)\n\ntype GNMIServer struct {\n\tAddress               string               `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tMinSampleInterval     time.Duration        `mapstructure:\"min-sample-interval,omitempty\" json:\"min-sample-interval,omitempty\"`\n\tDefaultSampleInterval time.Duration        `mapstructure:\"default-sample-interval,omitempty\" json:\"default-sample-interval,omitempty\"`\n\tMinHeartbeatInterval  time.Duration        `mapstructure:\"min-heartbeat-interval,omitempty\" json:\"min-heartbeat-interval,omitempty\"`\n\tMaxSubscriptions      int64                `mapstructure:\"max-subscriptions,omitempty\" json:\"max-subscriptions,omitempty\"`\n\tMaxUnaryRPC           int64                `mapstructure:\"max-unary-rpc,omitempty\" json:\"max-unary-rpc,omitempty\"`\n\tMaxRecvMsgSize        int                  `mapstructure:\"max-recv-msg-size,omitempty\" json:\"max-recv-msg-size,omitempty\"`\n\tMaxSendMsgSize        int                  `mapstructure:\"max-send-msg-size,omitempty\" json:\"max-send-msg-size,omitempty\"`\n\tMaxConcurrentStreams  uint32               `mapstructure:\"max-concurrent-streams,omitempty\" json:\"max-concurrent-streams,omitempty\"`\n\tTCPKeepalive          time.Duration        `mapstructure:\"tcp-keepalive,omitempty\" json:\"tcp-keepalive,omitempty\"`\n\tGRPCKeepalive         *grpcKeepaliveConfig `mapstructure:\"grpc-keepalive,omitempty\" json:\"grpc-keepalive,omitempty\"`\n\tRateLimit             int64                `mapstructure:\"rate-limit,omitempty\" json:\"rate-limit,omitempty\"`\n\tTimeout               time.Duration        `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tTLS                   *types.TLSConfig     `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tEnableMetrics         bool                 `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tDebug                 bool                 `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\t// ServiceRegistration\n\tServiceRegistration *serviceRegistration `mapstructure:\"service-registration,omitempty\" json:\"service-registration,omitempty\"`\n\t// cache config\n\tCache *cache.Config `mapstructure:\"cache,omitempty\" json:\"cache,omitempty\"`\n}\n\ntype serviceRegistration struct {\n\tAddress       string        `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tDatacenter    string        `mapstructure:\"datacenter,omitempty\" json:\"datacenter,omitempty\"`\n\tUsername      string        `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword      string        `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\tToken         string        `mapstructure:\"token,omitempty\" json:\"token,omitempty\"`\n\tName          string        `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tCheckInterval time.Duration `mapstructure:\"check-interval,omitempty\" json:\"check-interval,omitempty\"`\n\tMaxFail       int           `mapstructure:\"max-fail,omitempty\" json:\"max-fail,omitempty\"`\n\tTags          []string      `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\t//\n\tDeregisterAfter string `mapstructure:\"-\" json:\"-\"`\n}\n\n// from keepalive.ServerParameters\ntype grpcKeepaliveConfig struct {\n\t// MaxConnectionIdle is a duration for the amount of time after which an\n\t// idle connection would be closed by sending a GoAway. Idleness duration is\n\t// defined since the most recent time the number of outstanding RPCs became\n\t// zero or the connection establishment.\n\tMaxConnectionIdle time.Duration `mapstructure:\"max-connection-idle,omitempty\"` // The current default value is infinity.\n\t// MaxConnectionAge is a duration for the maximum amount of time a\n\t// connection may exist before it will be closed by sending a GoAway. A\n\t// random jitter of +/-10% will be added to MaxConnectionAge to spread out\n\t// connection storms.\n\tMaxConnectionAge time.Duration `mapstructure:\"max-connection-age,omitempty\"` // The current default value is infinity.\n\t// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after\n\t// which the connection will be forcibly closed.\n\tMaxConnectionAgeGrace time.Duration `mapstructure:\"max-connection-age-grace,omitempty\"` // The current default value is infinity.\n\t// After a duration of this time if the server doesn't see any activity it\n\t// pings the client to see if the transport is still alive.\n\t// If set below 1s, a minimum value of 1s will be used instead.\n\tTime time.Duration `mapstructure:\"time,omitempty\"` // The current default value is 2 hours.\n\t// After having pinged for keepalive check, the server waits for a duration\n\t// of Timeout and if no activity is seen even after that the connection is\n\t// closed.\n\tTimeout time.Duration `mapstructure:\"timeout,omitempty\"` // The current default value is 20 seconds.\n}\n\nfunc (gkc *grpcKeepaliveConfig) Convert() *keepalive.ServerParameters {\n\tif gkc == nil {\n\t\treturn nil\n\t}\n\treturn &keepalive.ServerParameters{\n\t\tMaxConnectionIdle:     gkc.MaxConnectionIdle,\n\t\tMaxConnectionAge:      gkc.MaxConnectionAge,\n\t\tMaxConnectionAgeGrace: gkc.MaxConnectionAgeGrace,\n\t\tTime:                  gkc.Time,\n\t\tTimeout:               gkc.Timeout,\n\t}\n}\n\nfunc (c *Config) GetGNMIServer() error {\n\tif !c.FileConfig.IsSet(\"gnmi-server\") {\n\t\treturn nil\n\t}\n\tc.GnmiServer = new(GNMIServer)\n\tc.GnmiServer.Address = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/address\"))\n\n\tmaxSubVal := os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/max-subscriptions\"))\n\tif maxSubVal != \"\" {\n\t\tmaxSub, err := strconv.Atoi(maxSubVal)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.GnmiServer.MaxSubscriptions = int64(maxSub)\n\t}\n\tmaxRPCVal := os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/max-unary-rpc\"))\n\tif maxRPCVal != \"\" {\n\t\tmaxUnaryRPC, err := strconv.Atoi(os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/max-unary-rpc\")))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.GnmiServer.MaxUnaryRPC = int64(maxUnaryRPC)\n\t}\n\tif c.FileConfig.IsSet(\"gnmi-server/tls\") {\n\t\tc.GnmiServer.TLS = new(types.TLSConfig)\n\t\tc.GnmiServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/tls/ca-file\"))\n\t\tc.GnmiServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/tls/cert-file\"))\n\t\tc.GnmiServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/tls/key-file\"))\n\t\tc.GnmiServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/tls/client-auth\"))\n\t\tif err := c.GnmiServer.TLS.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"gnmi-server TLS config error: %w\", err)\n\t\t}\n\t}\n\n\tc.GnmiServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/enable-metrics\")) == trueString\n\tc.GnmiServer.Debug = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/debug\")) == trueString\n\tc.GnmiServer.Timeout = c.FileConfig.GetDuration(\"gnmi-server/timeout\")\n\n\tc.setGnmiServerDefaults()\n\n\tif c.FileConfig.IsSet(\"gnmi-server/service-registration\") {\n\t\tc.GnmiServer.ServiceRegistration = new(serviceRegistration)\n\t\tc.GnmiServer.ServiceRegistration.Address = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/address\"))\n\t\tc.GnmiServer.ServiceRegistration.Datacenter = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/datacenter\"))\n\t\tc.GnmiServer.ServiceRegistration.Username = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/username\"))\n\t\tc.GnmiServer.ServiceRegistration.Password = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/password\"))\n\t\tc.GnmiServer.ServiceRegistration.Token = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/token\"))\n\t\tc.GnmiServer.ServiceRegistration.Name = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/service-registration/name\"))\n\t\tc.GnmiServer.ServiceRegistration.CheckInterval = c.FileConfig.GetDuration(\"gnmi-server/service-registration/check-interval\")\n\t\tc.GnmiServer.ServiceRegistration.MaxFail = c.FileConfig.GetInt(\"gnmi-server/service-registration/max-fail\")\n\t\tc.GnmiServer.ServiceRegistration.Tags = c.FileConfig.GetStringSlice(\"gnmi-server/service-registration/tags\")\n\t\tc.setGnmiServerServiceRegistrationDefaults()\n\t}\n\n\tif c.FileConfig.IsSet(\"gnmi-server/cache\") {\n\t\tc.GnmiServer.Cache = new(cache.Config)\n\t\tc.GnmiServer.Cache.Type = cache.CacheType(os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/cache/type\")))\n\t\tc.GnmiServer.Cache.Address = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/cache/address\"))\n\t\tc.GnmiServer.Cache.Timeout = c.FileConfig.GetDuration(\"gnmi-server/cache/timeout\")\n\t\tc.GnmiServer.Cache.Expiration = c.FileConfig.GetDuration(\"gnmi-server/cache/expiration\")\n\t\tc.GnmiServer.Cache.Debug = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/cache/debug\")) == trueString\n\t\tc.GnmiServer.Cache.Username = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/cache/username\"))\n\t\tc.GnmiServer.Cache.Password = os.ExpandEnv(c.FileConfig.GetString(\"gnmi-server/cache/password\"))\n\t\t//\n\t\tc.GnmiServer.Cache.MaxBytes = c.FileConfig.GetInt64(\"gnmi-server/cache/max-bytes\")\n\t\tc.GnmiServer.Cache.MaxMsgsPerSubscription = c.FileConfig.GetInt64(\"gnmi-server/cache/max-msgs-per-subscription\")\n\t\t//\n\t\tc.GnmiServer.Cache.FetchBatchSize = c.FileConfig.GetInt(\"gnmi-server/cache/fetch-batch-size\")\n\t\tc.GnmiServer.Cache.FetchWaitTime = c.FileConfig.GetDuration(\"gnmi-server/cache/fetch-wait-time\")\n\t}\n\treturn nil\n}\n\nfunc (c *Config) setGnmiServerDefaults() {\n\tif c.GnmiServer.Address == \"\" {\n\t\tc.GnmiServer.Address = defaultAddress\n\t}\n\tif c.GnmiServer.MaxSubscriptions <= 0 {\n\t\tc.GnmiServer.MaxSubscriptions = defaultMaxSubscriptions\n\t}\n\tif c.GnmiServer.MaxUnaryRPC <= 0 {\n\t\tc.GnmiServer.MaxUnaryRPC = defaultMaxUnaryRPC\n\t}\n\tif c.GnmiServer.MinSampleInterval <= 0 {\n\t\tc.GnmiServer.MinSampleInterval = minimumSampleInterval\n\t}\n\tif c.GnmiServer.DefaultSampleInterval <= 0 {\n\t\tc.GnmiServer.DefaultSampleInterval = defaultSampleInterval\n\t}\n\tif c.GnmiServer.MinHeartbeatInterval <= 0 {\n\t\tc.GnmiServer.MinHeartbeatInterval = minimumHeartbeatInterval\n\t}\n}\n\nfunc (c *Config) setGnmiServerServiceRegistrationDefaults() {\n\tif c.GnmiServer.ServiceRegistration.Address == \"\" {\n\t\tc.GnmiServer.ServiceRegistration.Address = defaultServiceRegistrationAddress\n\t}\n\tif c.GnmiServer.ServiceRegistration.CheckInterval <= 5*time.Second {\n\t\tc.GnmiServer.ServiceRegistration.CheckInterval = defaultRegistrationCheckInterval\n\t}\n\tif c.GnmiServer.ServiceRegistration.MaxFail <= 0 {\n\t\tc.GnmiServer.ServiceRegistration.MaxFail = defaultMaxServiceFail\n\t}\n\tderegisterTimer := c.GnmiServer.ServiceRegistration.CheckInterval * time.Duration(c.GnmiServer.ServiceRegistration.MaxFail)\n\tc.GnmiServer.ServiceRegistration.DeregisterAfter = deregisterTimer.String()\n}\n"
  },
  {
    "path": "pkg/config/inputs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t_ \"github.com/openconfig/gnmic/pkg/inputs/all\"\n)\n\nfunc (c *Config) GetInputs() (map[string]map[string]interface{}, error) {\n\terrs := make([]error, 0)\n\tinputsDef := c.FileConfig.GetStringMap(\"inputs\")\n\tfor name, inputCfg := range inputsDef {\n\t\tinputCfgconv := convert(inputCfg)\n\t\tswitch inputCfg := inputCfgconv.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tif outType, ok := inputCfg[\"type\"]; ok {\n\t\t\t\tif !strInlist(outType.(string), inputs.InputTypes) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unknown input type: %q\", outType)\n\t\t\t\t}\n\t\t\t\tif _, ok := inputs.Inputs[outType.(string)]; ok {\n\t\t\t\t\tformat, ok := inputCfg[\"format\"]\n\t\t\t\t\tif !ok || (ok && format == \"\") {\n\t\t\t\t\t\tinputCfg[\"format\"] = c.FileConfig.GetString(\"format\")\n\t\t\t\t\t}\n\t\t\t\t\tc.Inputs[name] = inputCfg\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr := fmt.Errorf(\"unknown input type '%s'\", outType)\n\t\t\t\tc.logger.Print(err)\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := fmt.Errorf(\"missing input 'type' under %v\", inputCfg)\n\t\t\tc.logger.Print(err)\n\t\t\terrs = append(errs, err)\n\t\tdefault:\n\t\t\tc.logger.Printf(\"unknown configuration format expecting a map[string]interface{}: got %T : %v\", inputCfg, inputCfg)\n\t\t\treturn nil, fmt.Errorf(\"unexpected inputs configuration format\")\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\treturn nil, fmt.Errorf(\"there was %d error(s) when getting inputs configuration\", len(errs))\n\t}\n\tfor n := range c.Inputs {\n\t\texpandMapEnv(c.Inputs[n], expandAll())\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"inputs: %+v\", c.Inputs)\n\t}\n\treturn c.Inputs, nil\n}\n"
  },
  {
    "path": "pkg/config/loader.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n\t_ \"github.com/openconfig/gnmic/pkg/loaders/all\"\n)\n\nfunc (c *Config) GetLoader() error {\n\tif c.GlobalFlags.TargetsFile != \"\" {\n\t\tc.Loader = map[string]interface{}{\n\t\t\t\"type\": \"file\",\n\t\t\t\"path\": c.GlobalFlags.TargetsFile,\n\t\t}\n\t\treturn nil\n\t}\n\n\tc.Loader = c.FileConfig.GetStringMap(\"loader\")\n\tfor k, v := range c.Loader {\n\t\tc.Loader[k] = convert(v)\n\t}\n\n\tif len(c.Loader) == 0 {\n\t\treturn nil\n\t}\n\tif _, ok := c.Loader[\"type\"]; !ok {\n\t\treturn errors.New(\"missing type field under loader configuration\")\n\t}\n\tif lds, ok := c.Loader[\"type\"].(string); ok {\n\t\tfor _, lt := range loaders.LoadersTypes {\n\t\t\tif lt == lds {\n\t\t\t\texpandMapEnv(c.Loader, func(k, v string) string {\n\t\t\t\t\tif k == \"password\" {\n\t\t\t\t\t\tif strings.HasPrefix(v, \"${\") && strings.HasSuffix(v, \"}\") {\n\t\t\t\t\t\t\treturn os.ExpandEnv(v)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn v\n\t\t\t\t\t}\n\t\t\t\t\treturn os.ExpandEnv(v)\n\t\t\t\t})\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"unknown loader type %q\", lds)\n\t}\n\treturn fmt.Errorf(\"field 'type' not a string, found a %T\", c.Loader[\"type\"])\n}\n"
  },
  {
    "path": "pkg/config/locker.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n\t_ \"github.com/openconfig/gnmic/pkg/lockers/all\"\n)\n\nfunc (c *Config) getLocker() error {\n\tif !c.FileConfig.IsSet(\"clustering/locker\") {\n\t\treturn errors.New(\"missing locker config\")\n\t}\n\tc.Clustering.Locker = c.FileConfig.GetStringMap(\"clustering/locker\")\n\tif len(c.Clustering.Locker) == 0 {\n\t\treturn errors.New(\"missing locker config\")\n\t}\n\tif lockerType, ok := c.Clustering.Locker[\"type\"]; ok {\n\t\tswitch lockerType := lockerType.(type) {\n\t\tcase string:\n\t\t\tif _, ok := lockers.Lockers[lockerType]; !ok {\n\t\t\t\treturn errors.New(\"unknown locker type\")\n\t\t\t}\n\t\tdefault:\n\t\t\treturn errors.New(\"wrong locker type format\")\n\t\t}\n\t\texpandMapEnv(c.Clustering.Locker, expandAll())\n\t\treturn nil\n\t}\n\treturn errors.New(\"missing locker type\")\n}\n"
  },
  {
    "path": "pkg/config/outputs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/all\"\n)\n\nfunc (c *Config) GetOutputs() (map[string]map[string]any, error) {\n\toutDef := c.FileConfig.GetStringMap(\"outputs\")\n\tif len(outDef) == 0 && !c.FileConfig.GetBool(\"subscribe-quiet\") {\n\t\tstdoutConfig := map[string]any{\n\t\t\t\"type\":              \"file\",\n\t\t\t\"file-type\":         \"stdout\",\n\t\t\t\"format\":            c.FileConfig.GetString(\"format\"),\n\t\t\t\"calculate-latency\": c.FileConfig.GetBool(\"calculate-latency\"),\n\t\t}\n\t\toutDef[\"default-stdout\"] = stdoutConfig\n\t}\n\tfor name, outputCfg := range outDef {\n\t\toutputCfgconv := convert(outputCfg)\n\t\tswitch outCfg := outputCfgconv.(type) {\n\t\tcase map[string]any:\n\t\t\tif outType, ok := outCfg[\"type\"]; ok {\n\t\t\t\tswitch outType := outType.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tif _, ok := outputs.OutputTypes[outType]; !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unknown output type: %q\", outType)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unknown output type: %T\", outType)\n\t\t\t\t}\n\t\t\t\tformat, ok := outCfg[\"format\"]\n\t\t\t\tif !ok || (ok && format == \"\") {\n\t\t\t\t\toutCfg[\"format\"] = c.FileConfig.GetString(\"format\")\n\t\t\t\t}\n\t\t\t\tc.Outputs[name] = outCfg\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.logger.Printf(\"missing output 'type' under %v\", outCfg)\n\t\tdefault:\n\t\t\tc.logger.Printf(\"unknown configuration format expecting a map[string]interface{}: got %T : %v\", outCfg, outCfg)\n\t\t}\n\t}\n\tfor n := range c.Outputs {\n\t\texpandMapEnv(c.Outputs[n], expandExcept(\"msg-template\", \"target-template\"))\n\t}\n\tnamedOutputs := c.FileConfig.GetStringSlice(\"subscribe-output\")\n\tif len(namedOutputs) == 0 {\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"outputs: %+v\", c.Outputs)\n\t\t}\n\t\treturn c.Outputs, nil\n\t}\n\tfilteredOutputs := make(map[string]map[string]interface{})\n\tnotFound := make([]string, 0)\n\tfor _, name := range namedOutputs {\n\t\tif o, ok := c.Outputs[name]; ok {\n\t\t\tfilteredOutputs[name] = o\n\t\t} else {\n\t\t\tnotFound = append(notFound, name)\n\t\t}\n\t}\n\tif len(notFound) > 0 {\n\t\treturn nil, fmt.Errorf(\"named output(s) not found in config file: %v\", notFound)\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"outputs: %+v\", filteredOutputs)\n\t}\n\treturn filteredOutputs, nil\n}\n\nfunc convert(i interface{}) interface{} {\n\tswitch x := i.(type) {\n\tcase map[interface{}]interface{}:\n\t\tnm := map[string]interface{}{}\n\t\tfor k, v := range x {\n\t\t\tnm[k.(string)] = convert(v)\n\t\t}\n\t\treturn nm\n\tcase map[string]interface{}:\n\t\tfor k, v := range x {\n\t\t\tx[k] = convert(v)\n\t\t}\n\tcase []interface{}:\n\t\tfor i, v := range x {\n\t\t\tx[i] = convert(v)\n\t\t}\n\t}\n\treturn i\n}\n\ntype outputSuggestion struct {\n\tName  string\n\tTypes []string\n}\n\nfunc (c *Config) GetOutputsSuggestions() []outputSuggestion {\n\toutDef := c.FileConfig.GetStringMap(\"outputs\")\n\tsuggestions := make([]outputSuggestion, 0, len(outDef))\n\tfor name, d := range outDef {\n\t\tdl := convert(d)\n\t\tsug := outputSuggestion{Name: name, Types: make([]string, 0)}\n\t\tswitch outs := dl.(type) {\n\t\tcase []interface{}:\n\t\t\tfor _, ou := range outs {\n\t\t\t\tswitch ou := ou.(type) {\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\tif outType, ok := ou[\"type\"]; ok {\n\t\t\t\t\t\tsug.Types = append(sug.Types, outType.(string))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsuggestions = append(suggestions, sug)\n\t}\n\tsort.Slice(suggestions, func(i, j int) bool {\n\t\treturn suggestions[i].Name < suggestions[j].Name\n\t})\n\treturn suggestions\n}\n\nfunc (c *Config) GetOutputsConfigs() [][]string {\n\toutDef := c.FileConfig.GetStringMap(\"outputs\")\n\tif outDef == nil {\n\t\treturn nil\n\t}\n\toutList := make([][]string, 0, len(outDef))\n\tfor name, outputCfg := range outDef {\n\t\tb, err := json.Marshal(outputCfg)\n\t\tif err != nil {\n\t\t\tc.logger.Printf(\"could not marshal output config: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\toutList = append(outList, []string{name, string(b)})\n\t}\n\treturn outList\n}\n"
  },
  {
    "path": "pkg/config/outputs_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar getOutputsTestSet = map[string]struct {\n\tenvs []string\n\tin   []byte\n\tout  map[string]map[string]interface{}\n}{\n\t\"basic_outputs\": {\n\t\tin: []byte(`\noutputs:\n  output1:\n    type: file\n    file-type: stdout\n  output2:\n    type: nats\n`),\n\t\tout: map[string]map[string]interface{}{\n\t\t\t\"output1\": {\n\t\t\t\t\"type\":      \"file\",\n\t\t\t\t\"file-type\": \"stdout\",\n\t\t\t\t\"format\":    \"\",\n\t\t\t},\n\t\t\t\"output2\": {\n\t\t\t\t\"type\":   \"nats\",\n\t\t\t\t\"format\": \"\",\n\t\t\t},\n\t\t},\n\t},\n\t\"basic_outputs_env\": {\n\t\tenvs: []string{\n\t\t\t\"NATS_ADDRESS=1.1.1.1\",\n\t\t},\n\t\tin: []byte(`\noutputs:\n  output1:\n    type: file\n    file-type: stdout\n  output2:\n    type: nats\n    address: ${NATS_ADDRESS}:1123\n`),\n\t\tout: map[string]map[string]interface{}{\n\t\t\t\"output1\": {\n\t\t\t\t\"type\":      \"file\",\n\t\t\t\t\"file-type\": \"stdout\",\n\t\t\t\t\"format\":    \"\",\n\t\t\t},\n\t\t\t\"output2\": {\n\t\t\t\t\"type\":    \"nats\",\n\t\t\t\t\"format\":  \"\",\n\t\t\t\t\"address\": \"1.1.1.1:1123\",\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestGetOutputs(t *testing.T) {\n\tfor name, data := range getOutputsTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, e := range data.envs {\n\t\t\t\tp := strings.SplitN(e, \"=\", 2)\n\t\t\t\tos.Setenv(p[0], p[1])\n\t\t\t}\n\t\t\tcfg := New()\n\t\t\tcfg.Debug = true\n\t\t\tcfg.SetLogger()\n\t\t\tcfg.FileConfig.SetConfigType(\"yaml\")\n\t\t\terr := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed reading config: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tv := cfg.FileConfig.Get(\"outputs\")\n\t\t\tt.Logf(\"raw interface outputs: %+v\", v)\n\t\t\touts, err := cfg.GetOutputs()\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", outs)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed getting outputs: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(outs, data.out) {\n\t\t\t\tt.Log(\"maps not equal\")\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/plugins.go",
    "content": "package config\n\nimport (\n\t\"time\"\n)\n\ntype PluginsConfig struct {\n\tPath         string        `mapstructure:\"path,omitempty\" json:\"path,omitempty\"`\n\tGlob         string        `mapstructure:\"glob,omitempty\" json:\"glob,omitempty\"`\n\tStartTimeout time.Duration `mapstructure:\"start-timeout,omitempty\" json:\"start-timeout,omitempty\"`\n\tDebug        bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\nfunc (c *Config) GetPluginsConfig() (*PluginsConfig, error) {\n\tif !c.FileConfig.IsSet(\"plugins\") && c.GlobalFlags.PluginProcessorsPath == \"\" {\n\t\treturn nil, nil\n\t}\n\tpc := &PluginsConfig{}\n\tpc.Path = c.GlobalFlags.PluginProcessorsPath\n\tif pc.Path == \"\" {\n\t\tpc.Path = c.FileConfig.GetString(\"plugins/path\")\n\t}\n\tpc.Glob = c.FileConfig.GetString(\"plugins/glob\")\n\tif pc.Glob == \"\" {\n\t\tpc.Glob = \"*\"\n\t}\n\tpc.StartTimeout = c.FileConfig.GetDuration(\"plugins/start-timeout\")\n\tpc.Debug = c.FileConfig.GetBool(\"plugins/debug\")\n\treturn pc, nil\n}\n"
  },
  {
    "path": "pkg/config/processors.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc (c *Config) GetEventProcessors() (map[string]map[string]interface{}, error) {\n\teps := c.FileConfig.GetStringMap(\"processors\")\n\tfor name, epc := range eps {\n\t\tswitch epc := epc.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tc.logger.Printf(\"validating processor %q config\", name)\n\t\t\terr := c.validateProcessorConfig(epc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.Processors[name] = epc\n\t\tcase nil:\n\t\t\treturn nil, fmt.Errorf(\"empty processor %q config\", name)\n\t\tdefault:\n\t\t\tc.logger.Printf(\"malformed processors config, %+v\", epc)\n\t\t\treturn nil, fmt.Errorf(\"malformed processors config, got %T\", epc)\n\t\t}\n\t}\n\tfor n, es := range c.Processors {\n\t\tfor nn, p := range es {\n\t\t\tes[nn] = convert(p)\n\t\t}\n\t\tc.Processors[n] = es\n\t}\n\tfor n := range c.Processors {\n\t\texpandMapEnv(c.Processors[n], expandExcept(\n\t\t\t\"expression\",\n\t\t\t\"condition\",\n\t\t\t\"value-names\", \"values\",\n\t\t\t\"tag-names\", \"tags\",\n\t\t\t\"old\", \"new\", // strings.replace\n\t\t\t\"source\", // starlark\n\t\t))\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"processors: %+v\", c.Processors)\n\t}\n\treturn c.Processors, nil\n}\n\nfunc (c *Config) validateProcessorConfig(pcfg map[string]interface{}) error {\n\tfor epType := range pcfg {\n\t\tif !strInlist(epType, formatters.EventProcessorTypes) {\n\t\t\treturn fmt.Errorf(\"unknown processors type: %s\", epType)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc strInlist(s string, ls []string) bool {\n\tfor _, ss := range ls {\n\t\tif ss == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/config/processors_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar getProcessorsTestSet = map[string]struct {\n\tenvs []string\n\tin   []byte\n\tout  map[string]map[string]interface{}\n}{\n\t\"basic_processors\": {\n\t\tin: []byte(`\nprocessors:\n  proc-convert-integer:\n    event-convert:\n      value-names:\n        - \".*\"\n      type: int\n\n  proc-delete-tag-name:\n    event-delete:\n      tag-names:\n        - \"^subscription-name\"\n\n  proc-delete-value-name:\n    event-delete:\n      value-names:\n        - \".*out-unicast-packets\"\n`),\n\t\tout: map[string]map[string]interface{}{\n\t\t\t\"proc-convert-integer\": {\n\t\t\t\t\"event-convert\": map[string]interface{}{\n\t\t\t\t\t\"value-names\": []interface{}{\".*\"},\n\t\t\t\t\t\"type\":        \"int\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"proc-delete-tag-name\": {\n\t\t\t\t\"event-delete\": map[string]interface{}{\n\t\t\t\t\t\"tag-names\": []interface{}{\"^subscription-name\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"proc-delete-value-name\": {\n\t\t\t\t\"event-delete\": map[string]interface{}{\n\t\t\t\t\t\"value-names\": []interface{}{\".*out-unicast-packets\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"basic_processors_with_env\": {\n\t\tenvs: []string{\n\t\t\t\"PROC_CONVERT_TYPE=int\",\n\t\t},\n\t\tin: []byte(`\nprocessors:\n  proc-convert-integer:\n    event-convert:\n      value-names:\n        - \".*\"\n      type: ${PROC_CONVERT_TYPE}\n\n  proc-delete-tag-name:\n    event-delete:\n      tag-names:\n        - \"^subscription-name\"\n\n  proc-delete-value-name:\n    event-delete:\n      value-names:\n        - \".*out-unicast-packets\"\n`),\n\t\tout: map[string]map[string]interface{}{\n\t\t\t\"proc-convert-integer\": {\n\t\t\t\t\"event-convert\": map[string]interface{}{\n\t\t\t\t\t\"value-names\": []interface{}{\".*\"},\n\t\t\t\t\t\"type\":        \"int\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"proc-delete-tag-name\": {\n\t\t\t\t\"event-delete\": map[string]interface{}{\n\t\t\t\t\t\"tag-names\": []interface{}{\"^subscription-name\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"proc-delete-value-name\": {\n\t\t\t\t\"event-delete\": map[string]interface{}{\n\t\t\t\t\t\"value-names\": []interface{}{\".*out-unicast-packets\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestGetProcessors(t *testing.T) {\n\tfor name, data := range getProcessorsTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, e := range data.envs {\n\t\t\t\tp := strings.SplitN(e, \"=\", 2)\n\t\t\t\tos.Setenv(p[0], p[1])\n\t\t\t}\n\t\t\tcfg := New()\n\t\t\tcfg.Debug = true\n\t\t\tcfg.SetLogger()\n\t\t\tcfg.FileConfig.SetConfigType(\"yaml\")\n\t\t\terr := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed reading config: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tv := cfg.FileConfig.Get(\"processors\")\n\t\t\tt.Logf(\"raw interface processors: %+v\", v)\n\t\t\touts, err := cfg.GetEventProcessors()\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", outs)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed getting processors: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\t//assert.EqualValues(t, data.out, outs)\n\t\t\tif !reflect.DeepEqual(outs, data.out) {\n\t\t\t\tt.Log(\"maps not equal\")\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/set.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n)\n\nconst (\n\tvarFileSuffix = \"_vars\"\n)\n\ntype UpdateItem struct {\n\tPath     string      `json:\"path,omitempty\" yaml:\"path,omitempty\"`\n\tValue    interface{} `json:\"value,omitempty\" yaml:\"value,omitempty\"`\n\tEncoding string      `json:\"encoding,omitempty\" yaml:\"encoding,omitempty\"`\n}\n\ntype SetRequestFile struct {\n\tUpdates          []*UpdateItem `json:\"updates,omitempty\" yaml:\"updates,omitempty\"`\n\tReplaces         []*UpdateItem `json:\"replaces,omitempty\" yaml:\"replaces,omitempty\"`\n\tUnionReplaces    []*UpdateItem `json:\"union-replaces,omitempty\" yaml:\"union-replaces,omitempty\"`\n\tDeletes          []string      `json:\"deletes,omitempty\" yaml:\"deletes,omitempty\"`\n\tCommitID         string        `yaml:\"commit-id,omitempty\" json:\"commit-id,omitempty\"`\n\tCommitAction     commitAction  `yaml:\"commit-action,omitempty\" json:\"commit-action,omitempty\"`\n\tRollbackDuration time.Duration `yaml:\"rollback-duration,omitempty\" json:\"rollback-duration,omitempty\"`\n}\n\ntype commitAction string\n\nconst (\n\tcommitActionRequest             commitAction = \"request\"\n\tcommitActionCancel              commitAction = \"cancel\"\n\tcommitActionConfirm             commitAction = \"confirm\"\n\tcommitActionSetRollbackDuration commitAction = \"set-rollback-duration\"\n)\n\nfunc (c *Config) ReadSetRequestTemplate() error {\n\tif len(c.SetRequestFile) == 0 {\n\t\treturn nil\n\t}\n\tc.setRequestTemplate = make([]*template.Template, len(c.SetRequestFile))\n\tfor i, srf := range c.SetRequestFile {\n\t\tb, err := gfile.ReadFile(context.TODO(), srf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"set request file %d content: %s\", i, string(b))\n\t\t}\n\t\t// read template\n\t\tc.setRequestTemplate[i], err = gtemplate.CreateTemplate(fmt.Sprintf(\"set-request-%d\", i), string(b))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn c.readTemplateVarsFile()\n}\n\nfunc (c *Config) readTemplateVarsFile() error {\n\tif c.SetRequestVars == \"\" {\n\t\text := filepath.Ext(c.SetRequestFile[0])\n\t\tc.SetRequestVars = fmt.Sprintf(\"%s%s%s\", c.SetRequestFile[0][0:len(c.SetRequestFile[0])-len(ext)], varFileSuffix, ext)\n\t\tc.logger.Printf(\"trying to find variable file %q\", c.SetRequestVars)\n\t\t_, err := os.Stat(c.SetRequestVars)\n\t\tif os.IsNotExist(err) {\n\t\t\tc.SetRequestVars = \"\"\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tb, err := readFile(c.SetRequestVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.setRequestVars == nil {\n\t\tc.setRequestVars = make(map[string]interface{})\n\t}\n\terr = yaml.Unmarshal(b, &c.setRequestVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttempInterface := convert(c.setRequestVars)\n\tswitch t := tempInterface.(type) {\n\tcase map[string]interface{}:\n\t\tc.setRequestVars = t\n\tdefault:\n\t\treturn errors.New(\"unexpected variables file format\")\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"request vars content: %v\", c.setRequestVars)\n\t}\n\treturn nil\n}\n\nfunc (c *Config) CreateSetRequestFromFile(targetName string) ([]*gnmi.SetRequest, error) {\n\tif len(c.setRequestTemplate) == 0 {\n\t\treturn nil, errors.New(\"missing set request template\")\n\t}\n\treqs := make([]*gnmi.SetRequest, 0, len(c.setRequestTemplate))\n\tbuf := new(bytes.Buffer)\n\tfor _, srf := range c.setRequestTemplate {\n\t\tbuf.Reset()\n\t\terr := srf.Execute(buf, templateInput{\n\t\t\tTargetName: targetName,\n\t\t\tVars:       c.setRequestVars,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"target %q template result:\\n%s\", targetName, buf.String())\n\t\t}\n\t\t//\n\t\treqFile := new(SetRequestFile)\n\t\terr = yaml.Unmarshal(buf.Bytes(), reqFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgnmiOpts := make([]api.GNMIOption, 0)\n\t\tbuf.Reset()\n\t\tfor _, upd := range reqFile.Updates {\n\t\t\tif upd.Path == \"\" {\n\t\t\t\tupd.Path = \"/\"\n\t\t\t}\n\n\t\t\tenc := upd.Encoding\n\t\t\tif enc == \"\" {\n\t\t\t\tenc = c.GlobalFlags.Encoding\n\t\t\t}\n\t\t\tbuf.Reset()\n\t\t\tswitch {\n\t\t\tcase strings.HasPrefix(upd.Path, \"cli:/\"):\n\t\t\t\tval, ok := upd.Value.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"value %v is not a string\", upd.Value)\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(val)\n\t\t\tdefault:\n\t\t\t\terr = json.NewEncoder(buf).Encode(convert(upd.Value))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\tapi.Update(\n\t\t\t\t\tapi.Path(strings.TrimSpace(upd.Path)),\n\t\t\t\t\tapi.Value(strings.TrimSpace(buf.String()), enc),\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t\tfor _, upd := range reqFile.Replaces {\n\t\t\tif upd.Path == \"\" {\n\t\t\t\tupd.Path = \"/\"\n\t\t\t}\n\t\t\tenc := upd.Encoding\n\t\t\tif enc == \"\" {\n\t\t\t\tenc = c.GlobalFlags.Encoding\n\t\t\t}\n\t\t\tbuf.Reset()\n\t\t\tswitch {\n\t\t\tcase upd.Path == \"cli:/\":\n\t\t\t\tval, ok := upd.Value.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"value %v is not a string\", upd.Value)\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(val)\n\t\t\tdefault:\n\t\t\t\terr = json.NewEncoder(buf).Encode(convert(upd.Value))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tgnmiOpts = append(gnmiOpts, api.Replace(\n\t\t\t\tapi.Path(strings.TrimSpace(upd.Path)),\n\t\t\t\tapi.Value(strings.TrimSpace(buf.String()), enc),\n\t\t\t),\n\t\t\t)\n\t\t}\n\t\tfor _, upd := range reqFile.UnionReplaces {\n\t\t\tif upd.Path == \"\" {\n\t\t\t\tupd.Path = \"/\"\n\t\t\t}\n\t\t\tenc := upd.Encoding\n\t\t\tif enc == \"\" {\n\t\t\t\tenc = c.GlobalFlags.Encoding\n\t\t\t}\n\t\t\tbuf.Reset()\n\t\t\tswitch {\n\t\t\tcase upd.Path == \"cli:/\":\n\t\t\t\tval, ok := upd.Value.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"value %v is not a string\", upd.Value)\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(val)\n\t\t\tdefault:\n\t\t\t\terr = json.NewEncoder(buf).Encode(convert(upd.Value))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tgnmiOpts = append(gnmiOpts, api.UnionReplace(\n\t\t\t\tapi.Path(strings.TrimSpace(upd.Path)),\n\t\t\t\tapi.Value(strings.TrimSpace(buf.String()), enc),\n\t\t\t),\n\t\t\t)\n\t\t}\n\n\t\tfor _, s := range reqFile.Deletes {\n\t\t\tgnmiOpts = append(gnmiOpts, api.Delete(strings.TrimSpace(s)))\n\t\t}\n\n\t\tif reqFile.CommitID != \"\" {\n\t\t\tswitch reqFile.CommitAction {\n\t\t\tcase commitActionRequest:\n\t\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\t\tapi.Extension_CommitRequest(\n\t\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\t\tc.LocalFlags.SetCommitRollbackDuration,\n\t\t\t\t\t))\n\t\t\tcase commitActionCancel:\n\t\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\t\tapi.Extension_CommitCancel(\n\t\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\t))\n\t\t\tcase commitActionConfirm:\n\t\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\t\tapi.Extension_CommitConfirm(\n\t\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\t))\n\t\t\tcase commitActionSetRollbackDuration:\n\t\t\t\tgnmiOpts = append(gnmiOpts,\n\t\t\t\t\tapi.Extension_CommitSetRollbackDuration(\n\t\t\t\t\t\tc.LocalFlags.SetCommitId,\n\t\t\t\t\t\tc.LocalFlags.SetCommitRollbackDuration,\n\t\t\t\t\t))\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"unknown commit action %s\", reqFile.CommitAction)\n\t\t\t}\n\t\t}\n\n\t\tsetReq, err := api.NewSetRequest(gnmiOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treqs = append(reqs, setReq)\n\t}\n\treturn reqs, nil\n}\n\ntype templateInput struct {\n\tTargetName string\n\tVars       map[string]interface{}\n}\n\nfunc (c *Config) CreateSetRequestFromProtoFile() ([]*gnmi.SetRequest, error) {\n\treqs := make([]*gnmi.SetRequest, 0, len(c.SetRequestProtoFile))\n\tfor _, r := range c.SetRequestProtoFile {\n\t\tb, err := os.ReadFile(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq := new(gnmi.SetRequest)\n\t\terr = prototext.Unmarshal(b, req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treqs = append(reqs, req)\n\t}\n\treturn reqs, nil\n}\n"
  },
  {
    "path": "pkg/config/set_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"text/template\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/testutils\"\n)\n\nvar createSetRequestFromFileTestSet = map[string]struct {\n\tin         *Config\n\ttargetName string\n\tout        *gnmi.SetRequest\n\terr        error\n}{\n\n\t\"set_update_request_from_file\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"updates\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path\",\n\t\t\t\t\t\t\"value\": \"value\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_replace_request_from_file\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"replaces\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path\",\n\t\t\t\t\t\t\"value\": \"value\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_delete_request_from_file\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"deletes\": [\n\t\t\t\t\t\"valid/path\"\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_update_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"updates\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path1\",\n\t\t\t\t\t\t\"value\": \"value1\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path2\",\n\t\t\t\t\t\t\"value\": \"value2\",\n\t\t\t\t\t\t\"encoding\": \"json_ietf\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_replace_request_from_file\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"replaces\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path1\",\n\t\t\t\t\t\t\"value\": \"value1\"\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"valid/path2\",\n\t\t\t\t\t\t\"value\": \"value2\",\n\t\t\t\t\t\t\"encoding\": \"json_ietf\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\tJsonIetfVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_multiple_delete_request_from_file\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"deletes\": [\n\t\t\t\t\t\"valid/path1\",\n\t\t\t\t\t\"valid/path2\"\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_combined_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{template.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"updates\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"/valid/path1\",\n\t\t\t\t\t\t\"value\": \"value1\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"replaces\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"/valid/path2\",\n\t\t\t\t\t\t\"value\": \"value2\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t\t\"deletes\": [\n\t\t\t\t\t\"valid/path\"\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value1\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t\t{Name: \"path2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\t\tJsonVal: []byte(\"\\\"value2\\\"\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"valid\"},\n\t\t\t\t\t\t{Name: \"path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"template_based_set_request\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"json\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`replaces:\n{{- range $interface := index .Vars .TargetName \"interfaces\" }}\n  - path: \"/interface[name={{ index $interface \"name\" }}]\"\n    encoding: \"json_ietf\"\n    value: \n      admin-state: {{ index $interface \"admin-state\" }}\n{{- range $index, $subinterface := index $interface \"subinterfaces\" }}\n      subinterface:\n        - index: {{ $index }}\n          admin-state: {{ index $subinterface \"admin-state\"}}\n          ipv4:\n            address:\n              - ip-prefix: {{ index $subinterface \"ipv4-address\"}}\n{{- end }}\n{{- end }}`))},\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"target1\": map[string]interface{}{\n\t\t\t\t\t\"interfaces\": []interface{}{\n\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\"name\":        \"ethernet-1/1\",\n\t\t\t\t\t\t\t\"admin-state\": \"enable\",\n\t\t\t\t\t\t\t\"subinterfaces\": []interface{}{\n\t\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\t\"admin-state\":  \"enable\",\n\t\t\t\t\t\t\t\t\t\"ipv4-address\": \"192.168.88.1/30\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttargetName: \"target1\",\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonIetfVal{\n\t\t\t\t\t\t\tJsonIetfVal: []byte(`{\"admin-state\":\"enable\",\"subinterface\":[{\"admin-state\":\"enable\",\"index\":0,\"ipv4\":{\"address\":[{\"ip-prefix\":\"192.168.88.1/30\"}]}}]}`),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_replace_origin_cli\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"replaces\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"cli:/\",\n\t\t\t\t\t\t\"value\": \"set interface ethernet-1/1 admin-state enable\\nset interface ethernet-1/2 admin-state enable\",\n\t\t\t\t\t\t\"encoding\": \"ascii\",\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tReplace: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tOrigin: \"cli\",\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\t\t\tAsciiVal: \"set interface ethernet-1/1 admin-state enable\\nset interface ethernet-1/2 admin-state enable\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n\t\"set_update_origin_cli\": {\n\t\tin: &Config{\n\t\t\tGlobalFlags{\n\t\t\t\tEncoding: \"ascii\",\n\t\t\t},\n\t\t\tLocalFlags{},\n\t\t\tnil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,\n\t\t\t[]*template.Template{\n\t\t\t\ttemplate.Must(template.New(\"set-request\").Parse(`{\n\t\t\t\t\"updates\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"path\": \"cli:/\",\n\t\t\t\t\t\t\"value\": \"set interface ethernet-1/1 admin-state enable\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}`))},\n\t\t\tnil,\n\t\t},\n\t\tout: &gnmi.SetRequest{\n\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t{\n\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\tOrigin: \"cli\",\n\t\t\t\t\t},\n\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{\n\t\t\t\t\t\t\tAsciiVal: \"set interface ethernet-1/1 admin-state enable\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\terr: nil,\n\t},\n}\n\nfunc TestCreateSetRequestFromFile(t *testing.T) {\n\tfor name, data := range createSetRequestFromFileTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsetReq, err := data.in.CreateSetRequestFromFile(data.targetName)\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"exp error: %+v\", data.err)\n\t\t\tt.Logf(\"got value: %+v\", setReq)\n\t\t\tt.Logf(\"got error: %+v\", err)\n\t\t\tif err != nil {\n\t\t\t\tif !strings.HasPrefix(err.Error(), data.err.Error()) {\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !testutils.SetRequestsEqual(setReq[0], data.out) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/subscriptions.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nconst (\n\tSubscriptionMode_STREAM               = \"STREAM\"\n\tSubscriptionMode_ONCE                 = \"ONCE\"\n\tSubscriptionMode_POLL                 = \"POLL\"\n\tSubscriptionStreamMode_TARGET_DEFINED = \"TARGET_DEFINED\"\n\tSubscriptionStreamMode_ON_CHANGE      = \"ON_CHANGE\"\n\tSubscriptionStreamMode_SAMPLE         = \"SAMPLE\"\n)\nconst (\n\tsubscriptionDefaultMode       = SubscriptionMode_STREAM\n\tsubscriptionDefaultStreamMode = SubscriptionStreamMode_TARGET_DEFINED\n\tsubscriptionDefaultEncoding   = \"JSON\"\n)\n\nvar ErrConfig = errors.New(\"config error\")\n\nfunc (c *Config) GetSubscriptions(cmd *cobra.Command) (map[string]*types.SubscriptionConfig, error) {\n\tif len(c.LocalFlags.SubscribePath) > 0 && len(c.LocalFlags.SubscribeName) > 0 {\n\t\treturn nil, fmt.Errorf(\"flags --path and --name cannot be mixed\")\n\t}\n\t// subscriptions from cli flags\n\tif len(c.LocalFlags.SubscribePath) > 0 {\n\t\treturn c.subscriptionConfigFromFlags(cmd)\n\t}\n\t// subscriptions from file\n\tsubDef := c.FileConfig.GetStringMap(\"subscriptions\")\n\tif c.Debug {\n\t\tc.logger.Printf(\"subscriptions map: %#v\", subDef)\n\t}\n\t// decode subscription config\n\tfor sn, s := range subDef {\n\t\tswitch s := s.(type) {\n\t\tcase map[string]any:\n\t\t\tsub, err := c.decodeSubscriptionConfig(sn, s, cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.Subscriptions[sn] = sub\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: subscriptions map: unexpected type %T\", ErrConfig, s)\n\t\t}\n\t}\n\n\t// named subscription\n\tif len(c.LocalFlags.SubscribeName) == 0 {\n\t\tif c.Debug {\n\t\t\tc.logger.Printf(\"subscriptions: %s\", c.Subscriptions)\n\t\t}\n\t\terr := validateSubscriptionsConfig(c.Subscriptions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.Subscriptions, nil\n\t}\n\tfilteredSubscriptions := make(map[string]*types.SubscriptionConfig)\n\tnotFound := make([]string, 0)\n\tfor _, name := range c.LocalFlags.SubscribeName {\n\t\tif s, ok := c.Subscriptions[name]; ok {\n\t\t\tfilteredSubscriptions[name] = s\n\t\t} else {\n\t\t\tnotFound = append(notFound, name)\n\t\t}\n\t}\n\tif len(notFound) > 0 {\n\t\treturn nil, fmt.Errorf(\"named subscription(s) not found in config file: %v\", notFound)\n\t}\n\tif c.Debug {\n\t\tc.logger.Printf(\"subscriptions: %s\", filteredSubscriptions)\n\t}\n\terr := validateSubscriptionsConfig(filteredSubscriptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn filteredSubscriptions, nil\n}\n\nfunc (c *Config) subscriptionConfigFromFlags(cmd *cobra.Command) (map[string]*types.SubscriptionConfig, error) {\n\tsub := &types.SubscriptionConfig{\n\t\tName:      fmt.Sprintf(\"default-%d\", time.Now().Unix()),\n\t\tModels:    []string{},\n\t\tPrefix:    c.LocalFlags.SubscribePrefix,\n\t\tTarget:    c.LocalFlags.SubscribeTarget,\n\t\tSetTarget: c.LocalFlags.SubscribeSetTarget,\n\t\tPaths:     c.LocalFlags.SubscribePath,\n\t\tMode:      c.LocalFlags.SubscribeMode,\n\t\tDepth:     c.LocalFlags.SubscribeDepth,\n\t}\n\t// if globalFlagIsSet(cmd, \"encoding\") {\n\t// \tsub.Encoding = &c.Encoding\n\t// }\n\tif flagIsSet(cmd, \"qos\") {\n\t\tsub.Qos = &c.LocalFlags.SubscribeQos\n\t}\n\tsub.StreamMode = c.LocalFlags.SubscribeStreamMode\n\tif flagIsSet(cmd, \"heartbeat-interval\") {\n\t\tsub.HeartbeatInterval = &c.LocalFlags.SubscribeHeartbeatInterval\n\t}\n\tif flagIsSet(cmd, \"sample-interval\") {\n\t\tsub.SampleInterval = &c.LocalFlags.SubscribeSampleInterval\n\t}\n\tsub.SuppressRedundant = c.LocalFlags.SubscribeSuppressRedundant\n\tsub.UpdatesOnly = c.LocalFlags.SubscribeUpdatesOnly\n\tsub.Models = c.LocalFlags.SubscribeModel\n\tif flagIsSet(cmd, \"history-snapshot\") {\n\t\tsnapshot, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistorySnapshot)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"history-snapshot: %v\", err)\n\t\t}\n\t\tsub.History = &types.HistoryConfig{\n\t\t\tSnapshot: snapshot,\n\t\t}\n\t}\n\tif flagIsSet(cmd, \"history-start\") && flagIsSet(cmd, \"history-end\") {\n\t\tstart, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryStart)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"history-start: %v\", err)\n\t\t}\n\t\tend, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryEnd)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"history-end: %v\", err)\n\t\t}\n\t\tsub.History = &types.HistoryConfig{\n\t\t\tStart: start,\n\t\t\tEnd:   end,\n\t\t}\n\t}\n\tc.Subscriptions[sub.Name] = sub\n\tif c.Debug {\n\t\tc.logger.Printf(\"subscriptions: %s\", c.Subscriptions)\n\t}\n\treturn c.Subscriptions, nil\n}\n\nfunc (c *Config) decodeSubscriptionConfig(sn string, s any, cmd *cobra.Command) (*types.SubscriptionConfig, error) {\n\tsub := new(types.SubscriptionConfig)\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     sub,\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = decoder.Decode(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsub.Name = sn\n\t// inherit global \"subscribe-*\" option if it's not set\n\tif err := c.setSubscriptionFieldsFromFlags(sub, cmd); err != nil {\n\t\treturn nil, err\n\t}\n\texpandSubscriptionEnv(sub)\n\treturn sub, nil\n}\n\nfunc (c *Config) setSubscriptionFieldsFromFlags(sub *types.SubscriptionConfig, cmd *cobra.Command) error {\n\tif sub.SampleInterval == nil && flagIsSet(cmd, \"sample-interval\") {\n\t\tsub.SampleInterval = &c.LocalFlags.SubscribeSampleInterval\n\t}\n\tif sub.HeartbeatInterval == nil && flagIsSet(cmd, \"heartbeat-interval\") {\n\t\tsub.HeartbeatInterval = &c.LocalFlags.SubscribeHeartbeatInterval\n\t}\n\t// if sub.Encoding == nil && globalFlagIsSet(cmd, \"encoding\") {\n\t// \tsub.Encoding = &c.Encoding\n\t// }\n\tif sub.Mode == \"\" {\n\t\tsub.Mode = c.LocalFlags.SubscribeMode\n\t}\n\tif strings.ToUpper(sub.Mode) == SubscriptionMode_STREAM && sub.StreamMode == \"\" {\n\t\tsub.StreamMode = c.LocalFlags.SubscribeStreamMode\n\t}\n\tif sub.Qos == nil && flagIsSet(cmd, \"qos\") {\n\t\tsub.Qos = &c.LocalFlags.SubscribeQos\n\t}\n\tif flagIsSet(cmd, \"depth\") {\n\t\tsub.Depth = c.LocalFlags.SubscribeDepth\n\t}\n\tif sub.History == nil && flagIsSet(cmd, \"history-snapshot\") {\n\t\tsnapshot, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistorySnapshot)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"history-snapshot: %v\", err)\n\t\t}\n\t\tsub.History = &types.HistoryConfig{\n\t\t\tSnapshot: snapshot,\n\t\t}\n\t}\n\tif sub.History == nil && flagIsSet(cmd, \"history-start\") && flagIsSet(cmd, \"history-end\") {\n\t\tstart, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryStart)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"history-start: %v\", err)\n\t\t}\n\t\tend, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryEnd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"history-end: %v\", err)\n\t\t}\n\t\tsub.History = &types.HistoryConfig{\n\t\t\tStart: start,\n\t\t\tEnd:   end,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Config) GetSubscriptionsFromFile() []*types.SubscriptionConfig {\n\tsubs, err := c.GetSubscriptions(nil)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tsubscriptions := make([]*types.SubscriptionConfig, 0)\n\tfor _, sub := range subs {\n\t\tsubscriptions = append(subscriptions, sub)\n\t}\n\tsort.Slice(subscriptions, func(i, j int) bool {\n\t\treturn subscriptions[i].Name < subscriptions[j].Name\n\t})\n\treturn subscriptions\n}\n\nfunc validateSubscriptionsConfig(subs map[string]*types.SubscriptionConfig) error {\n\tvar hasPoll bool\n\tvar hasOnce bool\n\tvar hasStream bool\n\tfor _, sc := range subs {\n\t\tswitch strings.ToUpper(sc.Mode) {\n\t\tcase \"POLL\":\n\t\t\thasPoll = true\n\t\tcase \"ONCE\":\n\t\t\thasOnce = true\n\t\tcase \"STREAM\":\n\t\t\thasStream = true\n\t\t}\n\t}\n\tif hasPoll && hasOnce || hasPoll && hasStream {\n\t\treturn errors.New(\"subscriptions with mode Poll cannot be mixed with Stream or Once\")\n\t}\n\treturn nil\n}\n\nfunc expandSubscriptionEnv(sc *types.SubscriptionConfig) {\n\tsc.Name = os.ExpandEnv(sc.Name)\n\tfor i := range sc.Models {\n\t\tsc.Models[i] = os.ExpandEnv(sc.Models[i])\n\t}\n\tsc.Prefix = os.ExpandEnv(sc.Prefix)\n\tsc.Target = os.ExpandEnv(sc.Target)\n\tfor i := range sc.Paths {\n\t\tsc.Paths[i] = os.ExpandEnv(sc.Paths[i])\n\t}\n\tsc.Mode = os.ExpandEnv(sc.Mode)\n\tsc.StreamMode = os.ExpandEnv(sc.StreamMode)\n\tif sc.Encoding != nil {\n\t\tsc.Encoding = pointer.ToString(os.ExpandEnv(*sc.Encoding))\n\t}\n}\n"
  },
  {
    "path": "pkg/config/subscriptions_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nfunc mustParseTime(tm string) time.Time {\n\ttmi, err := time.Parse(time.RFC3339Nano, tm)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"cannot parse time: %v\", err))\n\t}\n\n\treturn tmi\n}\n\nvar getSubscriptionsTestSet = map[string]struct {\n\tenvs   []string\n\tin     []byte\n\tout    map[string]*types.SubscriptionConfig\n\toutErr error\n}{\n\t\"no_globals\": {\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    paths: \n      - /valid/path\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName:  \"sub1\",\n\t\t\t\tPaths: []string{\"/valid/path\"},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t// \t\"with_globals\": {\n\t// \t\tin: []byte(`\n\t// subscribe-sample-interval: 10s\n\t// subscriptions:\n\t//   sub1:\n\t//     paths:\n\t//       - /valid/path\n\t// `),\n\t// \t\tout: map[string]*types.SubscriptionConfig{\n\t// \t\t\t\"sub1\": {\n\t// \t\t\t\tName:           \"sub1\",\n\t// \t\t\t\tPaths:          []string{\"/valid/path\"},\n\t// \t\t\t\tSampleInterval: pointer.ToDuration(10 * time.Second),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\toutErr: nil,\n\t// \t},\n\t\"2_subs\": {\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    paths: \n      - /valid/path\n  sub2:\n    paths: \n      - /valid/path2\n    mode: stream\n    stream-mode: on_change\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName:  \"sub1\",\n\t\t\t\tPaths: []string{\"/valid/path\"},\n\t\t\t},\n\t\t\t\"sub2\": {\n\t\t\t\tName:       \"sub2\",\n\t\t\t\tPaths:      []string{\"/valid/path2\"},\n\t\t\t\tMode:       \"stream\",\n\t\t\t\tStreamMode: \"on_change\",\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t// \t\"2_subs_with_globals\": {\n\t// \t\tin: []byte(`\n\t// subscribe-sample-interval: 10s\n\t// subscriptions:\n\t//   sub1:\n\t//     paths:\n\t//       - /valid/path\n\t//   sub2:\n\t//     paths:\n\t//       - /valid/path2\n\t//     mode: stream\n\t//     stream-mode: on_change\n\t// `),\n\t// \t\tout: map[string]*types.SubscriptionConfig{\n\t// \t\t\t\"sub1\": {\n\t// \t\t\t\tName:           \"sub1\",\n\t// \t\t\t\tPaths:          []string{\"/valid/path\"},\n\t// \t\t\t\tSampleInterval: pointer.ToDuration(10 * time.Second),\n\t// \t\t\t},\n\t// \t\t\t\"sub2\": {\n\t// \t\t\t\tName:           \"sub2\",\n\t// \t\t\t\tPaths:          []string{\"/valid/path2\"},\n\t// \t\t\t\tMode:           \"stream\",\n\t// \t\t\t\tStreamMode:     \"on_change\",\n\t// \t\t\t\tSampleInterval: pointer.ToDuration(10 * time.Second),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\toutErr: nil,\n\t// \t},\n\t\"3_subs_with_env\": {\n\t\tenvs: []string{\n\t\t\t\"SUB1_PATH=/valid/path\",\n\t\t\t\"SUB2_PATH=/valid/path2\",\n\t\t},\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    paths: \n      - ${SUB1_PATH}\n  sub2:\n    paths: \n      - ${SUB2_PATH}\n    mode: stream\n    stream-mode: on_change\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName:  \"sub1\",\n\t\t\t\tPaths: []string{\"/valid/path\"},\n\t\t\t},\n\t\t\t\"sub2\": {\n\t\t\t\tName:       \"sub2\",\n\t\t\t\tPaths:      []string{\"/valid/path2\"},\n\t\t\t\tMode:       \"stream\",\n\t\t\t\tStreamMode: \"on_change\",\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"history_snapshot\": {\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    paths: \n      - /valid/path\n    history:\n      snapshot: 2022-07-14T07:30:00.0Z\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName:  \"sub1\",\n\t\t\t\tPaths: []string{\"/valid/path\"},\n\t\t\t\tHistory: &types.HistoryConfig{\n\t\t\t\t\tSnapshot: mustParseTime(\"2022-07-14T07:30:00.0Z\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"history_range\": {\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    paths: \n      - /valid/path\n    history:\n      start: 2021-07-14T07:30:00.0Z\n      end: 2022-07-14T07:30:00.0Z\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName:  \"sub1\",\n\t\t\t\tPaths: []string{\"/valid/path\"},\n\t\t\t\tHistory: &types.HistoryConfig{\n\t\t\t\t\tStart: mustParseTime(\"2021-07-14T07:30:00.0Z\"),\n\t\t\t\t\tEnd:   mustParseTime(\"2022-07-14T07:30:00.0Z\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"subscription_list\": {\n\t\tin: []byte(`\nsubscriptions:\n  sub1:\n    stream-subscriptions:\n      - paths:\n        - /valid/path1\n        stream-mode: sample\n      - paths:\n        - /valid/path2\n        stream-mode: on-change\n`),\n\t\tout: map[string]*types.SubscriptionConfig{\n\t\t\t\"sub1\": {\n\t\t\t\tName: \"sub1\",\n\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tPaths:      []string{\"/valid/path1\"},\n\t\t\t\t\t\tStreamMode: \"sample\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tPaths:      []string{\"/valid/path2\"},\n\t\t\t\t\t\tStreamMode: \"on-change\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n}\n\nfunc TestGetSubscriptions(t *testing.T) {\n\tfor name, data := range getSubscriptionsTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, e := range data.envs {\n\t\t\t\tp := strings.SplitN(e, \"=\", 2)\n\t\t\t\tos.Setenv(p[0], p[1])\n\t\t\t}\n\t\t\tcfg := New()\n\t\t\tcfg.Debug = true\n\t\t\tcfg.SetLogger()\n\t\t\tcfg.FileConfig.SetConfigType(\"yaml\")\n\t\t\terr := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed reading config: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\terr = cfg.FileConfig.Unmarshal(cfg)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed fileConfig.Unmarshal: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tv := cfg.FileConfig.Get(\"subscriptions\")\n\t\t\tt.Logf(\"raw interface subscriptions: %+v\", v)\n\t\t\touts, err := cfg.GetSubscriptions(nil)\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", outs)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed getting subscriptions: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(outs, data.out) {\n\t\t\t\tt.Log(\"maps not equal\")\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// func TestConfig_CreateSubscribeRequest(t *testing.T) {\n// \ttype fields struct {\n// \t\tGlobalFlags        GlobalFlags\n// \t\tLocalFlags         LocalFlags\n// \t\tFileConfig         *viper.Viper\n// \t\tTargets            map[string]*types.TargetConfig\n// \t\tSubscriptions      map[string]*types.SubscriptionConfig\n// \t\tOutputs            map[string]map[string]interface{}\n// \t\tInputs             map[string]map[string]interface{}\n// \t\tProcessors         map[string]map[string]interface{}\n// \t\tClustering         *clustering\n// \t\tGnmiServer         *gnmiServer\n// \t\tAPIServer          *APIServer\n// \t\tLoader             map[string]interface{}\n// \t\tActions            map[string]map[string]interface{}\n// \t\tlogger             *log.Logger\n// \t\tsetRequestTemplate []*template.Template\n// \t\tsetRequestVars     map[string]interface{}\n// \t}\n// \ttype args struct {\n// \t\tsc     *types.SubscriptionConfig\n// \t\ttarget *types.TargetConfig\n// \t}\n// \ttests := []struct {\n// \t\tname    string\n// \t\tfields  fields\n// \t\targs    args\n// \t\twant    *gnmi.SubscribeRequest\n// \t\twantErr bool\n// \t}{\n// \t\t{\n// \t\t\tname: \"once_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"once\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_ONCE,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"once_subscription_multiple_paths\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t\t\"network-instance\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"once\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tMode: gnmi.SubscriptionList_ONCE,\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"network-instance\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"poll_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"poll\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_POLL,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"poll_subscription_multiple_paths\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t\t\"network-instance\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"poll\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"network-instance\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_POLL,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"stream_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"stream\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_STREAM,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"stream_subscription_multiple_paths\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t\t\"network-instance\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"stream\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"network-instance\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_STREAM,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"stream_sample_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tStreamMode:     \"sample\",\n// \t\t\t\t\tEncoding:       pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tSampleInterval: pointer.ToDuration(5 * time.Second),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"stream_on_change_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tStreamMode: \"on-change\",\n// \t\t\t\t\tEncoding:   pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"stream_target_defined_subscription\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tStreamMode: \"on_change\",\n// \t\t\t\t\tEncoding:   pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"subscription_with_history_snapshot\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode:     \"once\",\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tHistory: &types.HistoryConfig{\n// \t\t\t\t\t\tSnapshot: mustParseTime(\"2022-07-14T07:30:00.0Z\"),\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_ONCE,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t\tExtension: []*gnmi_ext.Extension{\n// \t\t\t\t\t{\n// \t\t\t\t\t\tExt: &gnmi_ext.Extension_History{\n// \t\t\t\t\t\t\tHistory: &gnmi_ext.History{\n// \t\t\t\t\t\t\t\tRequest: &gnmi_ext.History_SnapshotTime{\n// \t\t\t\t\t\t\t\t\tSnapshotTime: 1657783800000000,\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"combined_on-change_and_sample\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/admin-state\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"ON_CHANGE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/statistics\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"SAMPLE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"admin-state\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"statistics\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"combined_on-change_and_sample_multiple_paths\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/admin-state\",\n// \t\t\t\t\t\t\t\t\"interface/oper-state\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"ON_CHANGE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/statistics\",\n// \t\t\t\t\t\t\t\t\"interface/subinterface/statistics\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"SAMPLE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"admin-state\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_ON_CHANGE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"oper-state\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"statistics\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tMode: gnmi.SubscriptionMode_SAMPLE,\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"subinterface\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\t\t\t\tName: \"statistics\",\n// \t\t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"invalid_combined_paths_and_subscriptions\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths:    []string{\"network-instance\"},\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/admin-state\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"ON_CHANGE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/statistics\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"SAMPLE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: true,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"invalid_combined_subscriptions_mode\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/admin-state\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tMode: \"ONCE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/statistics\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"SAMPLE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: true,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"invalid_subscription mode\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t\tMode:     \"ONCE\",\n// \t\t\t\t\tStreamSubscriptions: []*types.SubscriptionConfig{\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/admin-state\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tMode: \"ON_CHANGE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\t{\n// \t\t\t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\t\t\"interface/statistics\",\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\tStreamMode: \"SAMPLE\",\n// \t\t\t\t\t\t},\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: true,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"encoding_from_target\",\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode: \"once\",\n// \t\t\t\t},\n// \t\t\t\ttarget: &types.TargetConfig{\n// \t\t\t\t\tEncoding: pointer.ToString(\"json_ietf\"),\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_ONCE,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t\t{\n// \t\t\tname: \"encoding_from_global\",\n// \t\t\tfields: fields{\n// \t\t\t\tGlobalFlags: GlobalFlags{Encoding: \"json_ietf\"},\n// \t\t\t},\n// \t\t\targs: args{\n// \t\t\t\tsc: &types.SubscriptionConfig{\n// \t\t\t\t\tPaths: []string{\n// \t\t\t\t\t\t\"interface\",\n// \t\t\t\t\t},\n// \t\t\t\t\tMode: \"once\",\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twant: &gnmi.SubscribeRequest{\n// \t\t\t\tRequest: &gnmi.SubscribeRequest_Subscribe{\n// \t\t\t\t\tSubscribe: &gnmi.SubscriptionList{\n// \t\t\t\t\t\tSubscription: []*gnmi.Subscription{\n// \t\t\t\t\t\t\t{\n// \t\t\t\t\t\t\t\tPath: &gnmi.Path{\n// \t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{{\n// \t\t\t\t\t\t\t\t\t\tName: \"interface\",\n// \t\t\t\t\t\t\t\t\t}},\n// \t\t\t\t\t\t\t\t},\n// \t\t\t\t\t\t\t},\n// \t\t\t\t\t\t},\n// \t\t\t\t\t\tMode:     gnmi.SubscriptionList_ONCE,\n// \t\t\t\t\t\tEncoding: gnmi.Encoding_JSON_IETF,\n// \t\t\t\t\t},\n// \t\t\t\t},\n// \t\t\t},\n// \t\t\twantErr: false,\n// \t\t},\n// \t}\n// \tfor _, tt := range tests {\n// \t\tt.Run(tt.name, func(t *testing.T) {\n// \t\t\tc := &Config{\n// \t\t\t\tGlobalFlags:        tt.fields.GlobalFlags,\n// \t\t\t\tLocalFlags:         tt.fields.LocalFlags,\n// \t\t\t\tFileConfig:         tt.fields.FileConfig,\n// \t\t\t\tTargets:            tt.fields.Targets,\n// \t\t\t\tSubscriptions:      tt.fields.Subscriptions,\n// \t\t\t\tOutputs:            tt.fields.Outputs,\n// \t\t\t\tInputs:             tt.fields.Inputs,\n// \t\t\t\tProcessors:         tt.fields.Processors,\n// \t\t\t\tClustering:         tt.fields.Clustering,\n// \t\t\t\tGnmiServer:         tt.fields.GnmiServer,\n// \t\t\t\tAPIServer:          tt.fields.APIServer,\n// \t\t\t\tLoader:             tt.fields.Loader,\n// \t\t\t\tActions:            tt.fields.Actions,\n// \t\t\t\tlogger:             tt.fields.logger,\n// \t\t\t\tsetRequestTemplate: tt.fields.setRequestTemplate,\n// \t\t\t\tsetRequestVars:     tt.fields.setRequestVars,\n// \t\t\t}\n// \t\t\tgot, err := c.CreateSubscribeRequest(tt.args.sc, tt.args.target)\n// \t\t\tif err != nil && tt.wantErr {\n// \t\t\t\tt.Logf(\"expected error: %v\", err)\n// \t\t\t\treturn\n// \t\t\t}\n// \t\t\tif (err != nil) != tt.wantErr {\n// \t\t\t\tt.Logf(\"Config.CreateSubscribeRequest() error   = %v\", err)\n// \t\t\t\tt.Logf(\"Config.CreateSubscribeRequest() wantErr = %v\", tt.wantErr)\n// \t\t\t\tt.Fail()\n// \t\t\t\treturn\n// \t\t\t}\n// \t\t\tt.Logf(\"got:\\n%s\", prototext.Format(got))\n// \t\t\tif !testutils.SubscribeRequestsEqual(got, tt.want) {\n// \t\t\t\tt.Logf(\"Config.CreateSubscribeRequest() got  = %v\", got)\n// \t\t\t\tt.Logf(\"Config.CreateSubscribeRequest() want = %v\", tt.want)\n// \t\t\t\tt.Fail()\n// \t\t\t}\n// \t\t})\n// \t}\n// }\n"
  },
  {
    "path": "pkg/config/targets.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"maps\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultTargetBufferSize = 100\n)\n\nvar ErrNoTargetsFound = errors.New(\"no targets found\")\n\nfunc (c *Config) GetTargets() (map[string]*types.TargetConfig, error) {\n\tvar err error\n\t// case address is defined in .Address\n\tif len(c.Address) > 0 {\n\t\tfor _, addr := range c.Address {\n\t\t\ttc := &types.TargetConfig{\n\t\t\t\tName:    addr,\n\t\t\t\tAddress: addr,\n\t\t\t}\n\t\t\terr = c.SetTargetConfigDefaults(tc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.Targets[tc.Name] = tc\n\t\t}\n\t\treturn c.Targets, nil\n\t}\n\t// case targets is defined in config file\n\ttargetsInt := c.FileConfig.Get(\"targets\")\n\ttargetsMap := make(map[string]interface{})\n\tswitch targetsInt := targetsInt.(type) {\n\tcase string:\n\t\tfor _, addr := range strings.Split(targetsInt, \" \") {\n\t\t\ttargetsMap[addr] = nil\n\t\t}\n\tcase map[string]interface{}:\n\t\ttargetsMap = targetsInt\n\tcase nil:\n\t\treturn nil, ErrNoTargetsFound\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected targets format, got: %T\", targetsInt)\n\t}\n\tif len(targetsMap) == 0 {\n\t\treturn nil, ErrNoTargetsFound\n\t}\n\n\tnewTargetsConfig := make(map[string]*types.TargetConfig)\n\tfor name, t := range targetsMap {\n\t\ttc := new(types.TargetConfig)\n\t\tswitch t := t.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tdecoder, err := mapstructure.NewDecoder(\n\t\t\t\t&mapstructure.DecoderConfig{\n\t\t\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\t\t\tResult:     tc,\n\t\t\t\t},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = decoder.Decode(t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase nil:\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected targets format, got a %T\", t)\n\t\t}\n\t\tif tc.Address == \"\" {\n\t\t\ttc.Address = name\n\t\t}\n\t\tif tc.Name == \"\" {\n\t\t\ttc.Name = name\n\t\t}\n\t\terr = c.SetTargetConfigDefaults(tc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = expandCertPaths(tc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// due to a viper bug that changes env values to lowercase if read\n\t\t// as part of a StringMap or interface{}:\n\t\t// read the target password as a string to maintain its case.\n\t\t// if it's not an empty string set it explicitly\n\t\tpass := c.FileConfig.GetString(fmt.Sprintf(\"targets/%s/password\", name))\n\t\tif pass != \"\" {\n\t\t\t*tc.Password = pass\n\t\t}\n\t\texpandTargetEnv(tc)\n\t\tnewTargetsConfig[name] = tc\n\t}\n\tc.Targets = newTargetsConfig\n\n\tsubNames := c.FileConfig.GetStringSlice(\"subscribe-name\")\n\tif len(subNames) == 0 {\n\t\treturn c.Targets, nil\n\t}\n\tfor n := range c.Targets {\n\t\tc.Targets[n].Subscriptions = subNames\n\t}\n\treturn c.Targets, nil\n}\n\nfunc (c *Config) SetTargetConfigDefaults(tc *types.TargetConfig) error {\n\treturn setTargetConfigDefaultsFromGlobalFlags(tc, &c.GlobalFlags, c.FileConfig.GetString(\"port\"))\n}\n\nfunc setTargetConfigDefaultsFromGlobalFlags(tc *types.TargetConfig, gflags *GlobalFlags, defaultGRPCPort string) error {\n\tif gflags.Port == \"\" {\n\t\tgflags.Port = defaultGRPCPort\n\t}\n\tif !strings.HasPrefix(tc.Address, \"unix://\") {\n\t\taddrList := strings.Split(tc.Address, \",\")\n\t\taddrs := make([]string, 0, len(addrList))\n\t\tfor _, addr := range addrList {\n\t\t\taddr = strings.TrimSpace(addr)\n\t\t\tif !gflags.UseTunnelServer {\n\t\t\t\t_, _, err := net.SplitHostPort(addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif strings.Contains(err.Error(), \"missing port in address\") ||\n\t\t\t\t\t\tstrings.Contains(err.Error(), \"too many colons in address\") {\n\t\t\t\t\t\taddr = net.JoinHostPort(addr, gflags.Port)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn fmt.Errorf(\"error parsing address '%s': %v\", addr, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\taddrs = append(addrs, addr)\n\t\t}\n\t\ttc.Address = strings.Join(addrs, \",\")\n\t}\n\tif tc.Username == nil {\n\t\ttc.Username = &gflags.Username\n\t}\n\tif tc.Password == nil {\n\t\ttc.Password = &gflags.Password\n\t}\n\tif tc.Token == nil {\n\t\ttc.Token = &gflags.Token\n\t}\n\tif tc.AuthScheme == \"\" {\n\t\ttc.AuthScheme = gflags.AuthScheme\n\t}\n\tif tc.Timeout == 0 {\n\t\ttc.Timeout = gflags.Timeout\n\t}\n\tif tc.Insecure == nil {\n\t\ttc.Insecure = &gflags.Insecure\n\t}\n\tif tc.SkipVerify == nil {\n\t\ttc.SkipVerify = &gflags.SkipVerify\n\t}\n\tif tc.Insecure != nil && !*tc.Insecure {\n\t\tif tc.TLSCA == nil {\n\t\t\tif gflags.TLSCa != \"\" {\n\t\t\t\ttc.TLSCA = &gflags.TLSCa\n\t\t\t}\n\t\t}\n\t\tif tc.TLSCert == nil {\n\t\t\ttc.TLSCert = &gflags.TLSCert\n\t\t}\n\t\tif tc.TLSKey == nil {\n\t\t\ttc.TLSKey = &gflags.TLSKey\n\t\t}\n\t}\n\tif tc.RetryTimer == 0 {\n\t\ttc.RetryTimer = gflags.Retry\n\t}\n\tif tc.TLSVersion == \"\" {\n\t\ttc.TLSVersion = gflags.TLSVersion\n\t}\n\tif tc.TLSMinVersion == \"\" {\n\t\ttc.TLSMinVersion = gflags.TLSMinVersion\n\t}\n\tif tc.TLSMaxVersion == \"\" {\n\t\ttc.TLSMaxVersion = gflags.TLSMaxVersion\n\t}\n\tif tc.TLSServerName == \"\" {\n\t\ttc.TLSServerName = gflags.TLSServerName\n\t}\n\tif tc.LogTLSSecret == nil {\n\t\ttc.LogTLSSecret = &gflags.LogTLSSecret\n\t}\n\tif tc.Gzip == nil {\n\t\ttc.Gzip = &gflags.Gzip\n\t}\n\tif tc.BufferSize == 0 {\n\t\ttc.BufferSize = defaultTargetBufferSize\n\t}\n\tif tc.Metadata == nil && gflags.Metadata != nil {\n\t\ttc.Metadata = make(map[string]string)\n\t\tmaps.Copy(tc.Metadata, gflags.Metadata)\n\t}\n\treturn nil\n}\n\nfunc (c *Config) SetTargetConfigDefaultsExpandEnv(tc *types.TargetConfig) error {\n\terr := c.SetTargetConfigDefaults(tc)\n\tif err != nil {\n\t\treturn err\n\t}\n\texpandTargetEnv(tc)\n\n\treturn nil\n}\n\nfunc (c *Config) TargetsList() []*types.TargetConfig {\n\ttargets := make([]*types.TargetConfig, 0, len(c.Targets))\n\tfor _, tc := range c.Targets {\n\t\ttargets = append(targets, tc)\n\t}\n\tsort.Slice(targets, func(i, j int) bool {\n\t\treturn targets[i].Name < targets[j].Name\n\t})\n\treturn targets\n}\n\nfunc expandCertPaths(tc *types.TargetConfig) error {\n\tif tc.Insecure != nil && !*tc.Insecure {\n\t\tvar err error\n\t\tif tc.TLSCA != nil && *tc.TLSCA != \"\" {\n\t\t\t*tc.TLSCA, err = expandOSPath(*tc.TLSCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif tc.TLSCert != nil && *tc.TLSCert != \"\" {\n\t\t\t*tc.TLSCert, err = expandOSPath(*tc.TLSCert)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif tc.TLSKey != nil && *tc.TLSKey != \"\" {\n\t\t\t*tc.TLSKey, err = expandOSPath(*tc.TLSKey)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc expandTargetEnv(tc *types.TargetConfig) {\n\ttc.Name = os.ExpandEnv(tc.Name)\n\ttc.Address = os.ExpandEnv(tc.Address)\n\tif tc.Username != nil {\n\t\t*tc.Username = os.ExpandEnv(*tc.Username)\n\t}\n\t// expandEnv for the pasword field only if it starts with $\n\t// https://github.com/karimra/gnmic/issues/496\n\tif tc.Password != nil && strings.HasPrefix(*tc.Password, \"$\") {\n\t\t*tc.Password = os.ExpandEnv(*tc.Password)\n\t}\n\tif tc.Token != nil {\n\t\t*tc.Token = os.ExpandEnv(*tc.Token)\n\t}\n\tif tc.TLSCA != nil {\n\t\t*tc.TLSCA = os.ExpandEnv(*tc.TLSCA)\n\t}\n\tif tc.TLSCert != nil {\n\t\t*tc.TLSCert = os.ExpandEnv(*tc.TLSCert)\n\t}\n\tif tc.TLSKey != nil {\n\t\t*tc.TLSKey = os.ExpandEnv(*tc.TLSKey)\n\t}\n\tfor i := range tc.Subscriptions {\n\t\ttc.Subscriptions[i] = os.ExpandEnv(tc.Subscriptions[i])\n\t}\n\tfor i := range tc.Outputs {\n\t\ttc.Outputs[i] = os.ExpandEnv(tc.Outputs[i])\n\t}\n\ttc.TLSMinVersion = os.ExpandEnv(tc.TLSMinVersion)\n\ttc.TLSMaxVersion = os.ExpandEnv(tc.TLSMaxVersion)\n\ttc.TLSVersion = os.ExpandEnv(tc.TLSVersion)\n\tfor i := range tc.ProtoFiles {\n\t\ttc.ProtoFiles[i] = os.ExpandEnv(tc.ProtoFiles[i])\n\t}\n\tfor i := range tc.ProtoDirs {\n\t\ttc.ProtoDirs[i] = os.ExpandEnv(tc.ProtoDirs[i])\n\t}\n\tfor i := range tc.Tags {\n\t\ttc.Tags[i] = os.ExpandEnv(tc.Tags[i])\n\t}\n}\n\nfunc (c *Config) GetDiffTargets() (*types.TargetConfig, map[string]*types.TargetConfig, error) {\n\ttargetsConfig, err := c.GetTargets()\n\tif err != nil {\n\t\tif !errors.Is(err, ErrNoTargetsFound) {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tvar refConfig *types.TargetConfig\n\tif rc, ok := targetsConfig[c.DiffRef]; ok {\n\t\trefConfig = rc\n\t} else {\n\t\trefConfig = &types.TargetConfig{\n\t\t\tName:    c.DiffRef,\n\t\t\tAddress: c.DiffRef,\n\t\t}\n\t\terr = c.SetTargetConfigDefaults(refConfig)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tcompareConfigs := make(map[string]*types.TargetConfig)\n\tfor _, cmp := range c.DiffCompare {\n\t\tif cc, ok := targetsConfig[cmp]; ok {\n\t\t\tcompareConfigs[cmp] = cc\n\t\t} else {\n\t\t\tcompConfig := &types.TargetConfig{\n\t\t\t\tName:    cmp,\n\t\t\t\tAddress: cmp,\n\t\t\t}\n\t\t\terr = c.SetTargetConfigDefaults(compConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\t\t\tcompareConfigs[compConfig.Name] = compConfig\n\t\t}\n\t}\n\treturn refConfig, compareConfigs, nil\n}\n\nfunc SetTargetConfigDefaults(s store.Store[any], tc *types.TargetConfig) error {\n\tgf, found, err := s.Get(\"global-flags\", \"global-flags\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !found {\n\t\treturn fmt.Errorf(\"global-flags not found\")\n\t}\n\tgflags, ok := gf.(GlobalFlags)\n\tif !ok {\n\t\treturn fmt.Errorf(\"global-flags is not a *GlobalFlags\")\n\t}\n\treturn setTargetConfigDefaultsFromGlobalFlags(tc, &gflags, \"\")\n}\n\nfunc SetTargetConfigDefaultsExpandEnv(s store.Store[any], tc *types.TargetConfig) error {\n\terr := SetTargetConfigDefaults(s, tc)\n\tif err != nil {\n\t\treturn err\n\t}\n\texpandTargetEnv(tc)\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/config/targets_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nvar getTargetsTestSet = map[string]struct {\n\tenvs   []string\n\tin     []byte\n\tout    map[string]*types.TargetConfig\n\toutErr error\n}{\n\t\"from_address\": {\n\t\tin: []byte(`\nport: 57400\nusername: admin\npassword: admin\naddress: 10.1.1.1\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(false),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"from_targets_only\": {\n\t\tin: []byte(`\ntargets:\n  10.1.1.1:57400:  \n    username: admin\n    password: admin\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(false),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"from_both_targets_and_main_section\": {\n\t\tin: []byte(`\nmetadata:\n  key1: val1\n  key2: val2\nusername: admin\npassword: admin\nskip-verify: true\ntargets:\n  10.1.1.1:57400:  \n    metadata:\n      override1: val2\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t\tMetadata: map[string]string{\n\t\t\t\t\t\"override1\": \"val2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"multiple_targets\": {\n\t\tin: []byte(`\nmetadata:\n  key1: val1\n  key2: val2\ntargets:\n  10.1.1.1:57400:\n    username: admin\n    password: admin\n  10.1.1.2:57400:\n    username: admin\n    password: admin\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(false),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t\tMetadata: map[string]string{\n\t\t\t\t\t\"key1\": \"val1\",\n\t\t\t\t\t\"key2\": \"val2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"10.1.1.2:57400\": {\n\t\t\t\tAddress:      \"10.1.1.2:57400\",\n\t\t\t\tName:         \"10.1.1.2:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(false),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t\tMetadata: map[string]string{\n\t\t\t\t\t\"key1\": \"val1\",\n\t\t\t\t\t\"key2\": \"val2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"multiple_targets_from_main_section\": {\n\t\tin: []byte(`\nskip-verify: true\ntargets:\n  10.1.1.1:57400:\n    username: admin\n    password: admin\n  10.1.1.2:57400:\n    username: admin\n    password: admin\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t\t\"10.1.1.2:57400\": {\n\t\t\t\tAddress:      \"10.1.1.2:57400\",\n\t\t\t\tName:         \"10.1.1.2:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"multiple_targets_with_gzip\": {\n\t\tin: []byte(`\nskip-verify: true\ntargets:\n  10.1.1.1:57400:\n    username: admin\n    password: admin\n    gzip: true\n  10.1.1.2:57400:\n    username: admin\n    password: admin\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(true),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t\t\"10.1.1.2:57400\": {\n\t\t\t\tAddress:      \"10.1.1.2:57400\",\n\t\t\t\tName:         \"10.1.1.2:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"with_envs\": {\n\t\tenvs: []string{\n\t\t\t\"SUB_NAME=sub1\",\n\t\t\t\"OUT_NAME=o1\",\n\t\t},\n\t\tin: []byte(`\nskip-verify: true\ntargets:\n  10.1.1.1:57400:\n    username: admin\n    password: admin\n    outputs:\n      - ${OUT_NAME}\n    subscriptions:\n      - ${SUB_NAME}\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"10.1.1.1:57400\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400\",\n\t\t\t\tName:         \"10.1.1.1:57400\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(true),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t\tSubscriptions: []string{\n\t\t\t\t\t\"sub1\",\n\t\t\t\t},\n\t\t\t\tOutputs: []string{\n\t\t\t\t\t\"o1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n\t\"target_with_multiple_addresses\": {\n\t\tin: []byte(`\nport: 57400\ntargets:\n  target1:\n    username: admin\n    password: admin\n    address: 10.1.1.1,10.1.1.2\n`),\n\t\tout: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {\n\t\t\t\tAddress:      \"10.1.1.1:57400,10.1.1.2:57400\",\n\t\t\t\tName:         \"target1\",\n\t\t\t\tPassword:     pointer.ToString(\"admin\"),\n\t\t\t\tUsername:     pointer.ToString(\"admin\"),\n\t\t\t\tToken:        pointer.ToString(\"\"),\n\t\t\t\tTLSCert:      pointer.ToString(\"\"),\n\t\t\t\tTLSKey:       pointer.ToString(\"\"),\n\t\t\t\tLogTLSSecret: pointer.ToBool(false),\n\t\t\t\tInsecure:     pointer.ToBool(false),\n\t\t\t\tSkipVerify:   pointer.ToBool(false),\n\t\t\t\tGzip:         pointer.ToBool(false),\n\t\t\t\tBufferSize:   uint(100),\n\t\t\t},\n\t\t},\n\t\toutErr: nil,\n\t},\n}\n\nfunc TestGetTargets(t *testing.T) {\n\tfor name, data := range getTargetsTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, e := range data.envs {\n\t\t\t\tp := strings.SplitN(e, \"=\", 2)\n\t\t\t\tos.Setenv(p[0], p[1])\n\t\t\t}\n\t\t\tcfg := New()\n\t\t\tcfg.Debug = true\n\t\t\tcfg.SetLogger()\n\t\t\tcfg.FileConfig.SetConfigType(\"yaml\")\n\t\t\terr := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed reading config: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\terr = cfg.FileConfig.Unmarshal(cfg)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed fileConfig.Unmarshal: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tv := cfg.FileConfig.Get(\"targets\")\n\t\t\tt.Logf(\"raw interface targets: %+v\", v)\n\t\t\touts, err := cfg.GetTargets()\n\t\t\tt.Logf(\"exp value: %+v\", data.out)\n\t\t\tt.Logf(\"got value: %+v\", outs)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed getting targets: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(outs, data.out) {\n\t\t\t\tt.Log(\"maps not equal\")\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n\nvar setTargetLoaderConfigDefaultsTest = map[string]struct {\n\tenvs   []string\n\tin     []byte\n\tout    *types.TargetConfig\n\toutErr error\n}{\n\t\"from_address\": {\n\t\tenvs: []string{\n\t\t\t\"username=user1\",\n\t\t\t\"pass=pass1\",\n\t\t},\n\t\tin: []byte(`\ntest1:\n    name: test1.123\n    address: test1.123:9339\n    username: ${username}\n    password: ${pass}\n    subscriptions:\n        - drivenets-sample\n`),\n\t\tout: &types.TargetConfig{\n\t\t\tAddress:       \"test1.123:9339\",\n\t\t\tName:          \"test1.123\",\n\t\t\tPassword:      pointer.ToString(\"pass1\"),\n\t\t\tUsername:      pointer.ToString(\"user1\"),\n\t\t\tToken:         pointer.ToString(\"\"),\n\t\t\tTLSCert:       pointer.ToString(\"\"),\n\t\t\tTLSKey:        pointer.ToString(\"\"),\n\t\t\tLogTLSSecret:  pointer.ToBool(false),\n\t\t\tInsecure:      pointer.ToBool(false),\n\t\t\tSkipVerify:    pointer.ToBool(false),\n\t\t\tGzip:          pointer.ToBool(false),\n\t\t\tBufferSize:    uint(100),\n\t\t\tSubscriptions: []string{\"drivenets-sample\"},\n\t\t},\n\t\toutErr: nil,\n\t},\n}\n\nfunc TestSetTargetLoaderConfigDefaults(t *testing.T) {\n\tfor name, data := range setTargetLoaderConfigDefaultsTest {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, e := range data.envs {\n\t\t\t\tp := strings.SplitN(e, \"=\", 2)\n\t\t\t\tos.Setenv(p[0], p[1])\n\t\t\t}\n\t\t\tvar inputMap map[string]*types.TargetConfig\n\t\t\terr := yaml.Unmarshal(data.in, &inputMap)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"failed to unmarshal input: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tvar input *types.TargetConfig\n\t\t\tfor _, v := range inputMap {\n\t\t\t\tinput = v\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcfg := New()\n\t\t\terr = cfg.SetTargetConfigDefaultsExpandEnv(input)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"SetTargetLoaderConfigDefaults error: %v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(input, data.out) {\n\t\t\t\tt.Logf(\"expected: %+v\", data.out)\n\t\t\t\tt.Logf(\"got: %+v\", input)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/tunnel_server.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nconst (\n\tdefaultTargetWaitTime = 2 * time.Second\n)\n\ntype TunnelServer struct {\n\tAddress string `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\t// TLS\n\tTLS *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\t//\n\tTargetWaitTime time.Duration `mapstructure:\"target-wait-time,omitempty\" json:\"target-wait-time,omitempty\"`\n\t//\n\tEnableMetrics bool `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tDebug         bool `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\t// targets\n\tTargets []*TunnelTargetMatch `mapstructure:\"targets,omitempty\" json:\"targets,omitempty\"`\n}\n\ntype TunnelTargetMatch struct {\n\t// target Type as reported by the tunnel.Target to the Tunnel Server\n\tType string `mapstructure:\"type,omitempty\" json:\"type,omitempty\"`\n\t// a Regex pattern to check the target ID as reported by\n\t// the tunnel.Target to the Tunnel Server\n\tID string `mapstructure:\"id,omitempty\" json:\"id,omitempty\"`\n\t// Optional gnmic.Target Configuration that will be assigned to the target with\n\t// an ID matching the above regex\n\tConfig types.TargetConfig `mapstructure:\"config,omitempty\" json:\"config,omitempty\"`\n}\n\nfunc (c *Config) GetTunnelServer() error {\n\tif !c.FileConfig.IsSet(\"tunnel-server\") {\n\t\treturn nil\n\t}\n\tc.TunnelServer = new(TunnelServer)\n\tc.TunnelServer.Address = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/address\"))\n\n\tif c.FileConfig.IsSet(\"tunnel-server/tls\") {\n\t\tc.TunnelServer.TLS = new(types.TLSConfig)\n\t\tc.TunnelServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/tls/ca-file\"))\n\t\tc.TunnelServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/tls/cert-file\"))\n\t\tc.TunnelServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/tls/key-file\"))\n\t\tc.TunnelServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/tls/client-auth\"))\n\t\tif err := c.TunnelServer.TLS.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"tunnel-server TLS config error: %w\", err)\n\t\t}\n\t}\n\tc.TunnelServer.TargetWaitTime = c.FileConfig.GetDuration(\"tunnel-server/target-wait-time\")\n\tc.TunnelServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/enable-metrics\")) == trueString\n\tc.TunnelServer.Debug = os.ExpandEnv(c.FileConfig.GetString(\"tunnel-server/debug\")) == trueString\n\n\tvar err error\n\tc.TunnelServer.Targets = make([]*TunnelTargetMatch, 0)\n\ttargetMatches := c.FileConfig.Get(\"tunnel-server/targets\")\n\tswitch targetMatches := targetMatches.(type) {\n\tcase []interface{}:\n\t\tfor _, tmi := range targetMatches {\n\t\t\ttm := new(TunnelTargetMatch)\n\t\t\terr = mapstructure.Decode(utils.Convert(tmi), tm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.TunnelServer.Targets = append(c.TunnelServer.Targets, tm)\n\t\t}\n\tcase nil:\n\tdefault:\n\t\treturn fmt.Errorf(\"tunnel-server has an unexpected target configuration type %T\", targetMatches)\n\t}\n\n\tc.setTunnelServerDefaults()\n\treturn nil\n}\n\nfunc (c *Config) setTunnelServerDefaults() {\n\tif c.TunnelServer.Address == \"\" {\n\t\tc.TunnelServer.Address = defaultAddress\n\t}\n\tif c.TunnelServer.TargetWaitTime <= 0 {\n\t\tc.TunnelServer.TargetWaitTime = defaultTargetWaitTime\n\t}\n}\n"
  },
  {
    "path": "pkg/file/file.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/crypto/ssh/agent\"\n\t\"golang.org/x/crypto/ssh/knownhosts\"\n\n\t\"github.com/jlaffaye/ftp\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/pkg/sftp\"\n)\n\nconst (\n\tdefaultFTPPort  = 21\n\tdefaultSFTPPort = 22\n)\n\n// ReadFile reads a local or remote file and returns the read bytes,\n// the location of the file is determined based on its prefix,\n// http(s), (s)ftp prefixes are supported.\n// no prefix means the file is local. `-` means stdin.\nfunc ReadFile(ctx context.Context, path string) ([]byte, error) {\n\t// read file bytes based on the path prefix\n\tswitch {\n\tcase strings.HasPrefix(path, \"https://\"):\n\t\treturn readHTTPFile(ctx, path)\n\tcase strings.HasPrefix(path, \"http://\"):\n\t\treturn readHTTPFile(ctx, path)\n\tcase strings.HasPrefix(path, \"ftp://\"):\n\t\treturn readFTPFile(ctx, path)\n\tcase strings.HasPrefix(path, \"sftp://\"):\n\t\treturn readSFTPFile(ctx, path, false)\n\tdefault:\n\t\treturn utils.ReadLocalFile(ctx, path)\n\t}\n}\n\n// readHTTPFile fetches a remote from from an HTTP server,\n// the response body can be yaml or json bytes.\n// it then unmarshal the received bytes into a map[string]*types.TargetConfig\n// and returns\nfunc readHTTPFile(ctx context.Context, path string) ([]byte, error) {\n\t_, err := url.Parse(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := new(http.Client)\n\tif strings.HasPrefix(path, \"https://\") {\n\t\tclient.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t}\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, path, new(bytes.Buffer))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"unexpected HTTP status code %d, GET from %s\", r.StatusCode, path)\n\t}\n\tdefer r.Body.Close()\n\treturn io.ReadAll(r.Body)\n}\n\n// readFTPFile reads a file from a remote FTP server\n// unmarshals the content into a map[string]*types.TargetConfig\n// and returns\nfunc readFTPFile(ctx context.Context, path string) ([]byte, error) {\n\tparsedUrl, err := url.Parse(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse URL: %v\", err)\n\t}\n\n\t// Get user name and pass\n\tuser := parsedUrl.User.Username()\n\tpass, _ := parsedUrl.User.Password()\n\n\t// Parse Host and Port\n\thost := parsedUrl.Host\n\t_, _, err = net.SplitHostPort(host)\n\tif err != nil {\n\t\thost = fmt.Sprintf(\"%s:%d\", host, defaultFTPPort)\n\t}\n\t// connect to server\n\n\tconn, err := ftp.Dial(host, ftp.DialWithContext(ctx))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to [%s]: %v\", host, err)\n\t}\n\n\terr = conn.Login(user, pass)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to login to [%s]: %v\", host, err)\n\t}\n\n\tr, err := conn.Retr(parsedUrl.RequestURI())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read remote file %q: %v\", parsedUrl.RequestURI(), err)\n\t}\n\tdefer r.Close()\n\treturn io.ReadAll(r)\n}\n\n// readSFTPFile reads a file from a remote SFTP server\n// unmarshals the content into a map[string]*types.TargetConfig\n// and returns\nfunc readSFTPFile(_ context.Context, path string, checkHostKey bool) ([]byte, error) {\n\tparsedUrl, err := url.Parse(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse URL: %v\", err)\n\t}\n\n\t// Get user name and pass\n\tuser := parsedUrl.User.Username()\n\tpass, _ := parsedUrl.User.Password()\n\n\t// Parse Host and Port\n\thost := parsedUrl.Host\n\t_, _, err = net.SplitHostPort(host)\n\tif err != nil {\n\t\thost = fmt.Sprintf(\"%s:%d\", host, defaultSFTPPort)\n\t}\n\n\tvar auths []ssh.AuthMethod\n\n\t// Try to use $SSH_AUTH_SOCK which contains the path of the unix file socket that the sshd agent uses\n\t// for communication with other processes.\n\tif aconn, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\")); err == nil {\n\t\tauths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers))\n\t}\n\n\t// Use password authentication if provided\n\tif pass != \"\" {\n\t\tauths = append(auths, ssh.Password(pass))\n\t}\n\n\t// Initialize client configuration\n\tconfig := ssh.ClientConfig{\n\t\tUser: user,\n\t\tAuth: auths,\n\t}\n\n\t// if checkHostKey is set, try loading the know_hosts file\n\tif checkHostKey {\n\t\tknownHostsFile := filepath.Join(os.Getenv(\"HOME\"), \".ssh\", \"known_hosts\")\n\t\t// check ~/.ssh/known_hosts existence\n\t\tif !FileExists(knownHostsFile) {\n\t\t\treturn nil, fmt.Errorf(\"known_hosts file %s does not exist\", knownHostsFile)\n\t\t}\n\n\t\t// load the known_hosts file retrieving an ssh.HostKeyCallback\n\t\tconfig.HostKeyCallback, err = knownhosts.New(knownHostsFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// use the use the InsecureIgnoreHostKey implementation\n\t\tconfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()\n\t}\n\n\t// Connect to server\n\tconn, err := ssh.Dial(\"tcp\", host, &config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to [%s]: %v\", host, err)\n\t}\n\tdefer conn.Close()\n\n\t// Create new SFTP client\n\tsc, err := sftp.NewClient(conn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to start SFTP subsystem: %v\", err)\n\t}\n\tdefer sc.Close()\n\n\t// open File\n\tfile, err := sc.Open(parsedUrl.RequestURI())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open the remote file %q: %v\", parsedUrl.RequestURI(), err)\n\t}\n\tdefer file.Close()\n\n\t// stat file to get its size\n\tst, err := file.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif st.IsDir() {\n\t\treturn nil, fmt.Errorf(\"remote file %q is a directory\", parsedUrl.RequestURI())\n\t}\n\t// create a []byte with length equal to the file size\n\tb := make([]byte, st.Size())\n\t// read the file\n\t_, err = file.Read(b)\n\treturn b, err\n}\n\n// FileExists returns true if a file referenced by filename exists & accessible.\nfunc FileExists(filename string) bool {\n\tf, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn !f.IsDir()\n}\n"
  },
  {
    "path": "pkg/formatters/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_add_tag\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_allow\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_combine\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_convert\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_data_convert\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_date_string\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_delete\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_drop\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_duration_convert\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_extract_tags\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_group_by\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_ieeefloat32\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_jq\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_merge\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_override_ts\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_rate_limit\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_starlark\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_strings\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_time_epoch\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_to_tag\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_trigger\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_value_tag\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_value_tag_v2\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/event_write\"\n)\n"
  },
  {
    "path": "pkg/formatters/event.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n\t\"sync\"\n\n\tflattener \"github.com/karimra/go-map-flattener\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\n// EventMsg represents a gNMI update message,\n// The name is derived from the subscription in case the update was received in a subscribeResponse\n// the tags are derived from the keys in gNMI path as well as some metadata from the subscription.\ntype EventMsg struct {\n\tName      string                 `json:\"name,omitempty\"`\n\tTimestamp int64                  `json:\"timestamp,omitempty\"`\n\tTags      map[string]string      `json:\"tags,omitempty\"`\n\tValues    map[string]interface{} `json:\"values,omitempty\"`\n\tDeletes   []string               `json:\"deletes,omitempty\"`\n}\n\nfunc (e *EventMsg) String() string {\n\tb, _ := json.Marshal(e)\n\treturn string(b)\n}\n\n// ResponseToEventMsgs //\nfunc ResponseToEventMsgs(name string, rsp *gnmi.SubscribeResponse, meta map[string]string, eps ...EventProcessor) ([]*EventMsg, error) {\n\tif rsp == nil {\n\t\treturn nil, nil\n\t}\n\tevs := make([]*EventMsg, 0, len(rsp.GetUpdate().GetUpdate())+len(rsp.GetUpdate().GetDelete()))\n\tswitch rsp := rsp.Response.(type) {\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tnamePrefix, prefixTags := tagsFromGNMIPath(rsp.Update.GetPrefix())\n\t\t// notification updates\n\t\tuevs, err := updatesToEvent(name, namePrefix, rsp.Update.GetTimestamp(), rsp.Update.GetUpdate(), prefixTags, meta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tevs = append(evs, uevs...)\n\t\t// notification deletes\n\t\tfor _, del := range rsp.Update.GetDelete() {\n\t\t\te := deleteToEvent(name, namePrefix, rsp.Update.GetTimestamp(), del, prefixTags)\n\t\t\taddMetaTags(e, meta)\n\t\t\tif (e != nil && e != &EventMsg{}) {\n\t\t\t\tevs = append(evs, e)\n\t\t\t}\n\t\t}\n\n\t\tfor _, ep := range eps {\n\t\t\tevs = ep.Apply(evs...)\n\t\t}\n\t}\n\treturn evs, nil\n}\n\nfunc GetResponseToEventMsgs(rsp *gnmi.GetResponse, meta map[string]string, eps ...EventProcessor) ([]*EventMsg, error) {\n\tif rsp == nil {\n\t\treturn nil, nil\n\t}\n\tevs := make([]*EventMsg, 0, len(rsp.GetNotification()))\n\tfor _, notif := range rsp.GetNotification() {\n\t\tnamePrefix, prefixTags := tagsFromGNMIPath(notif.GetPrefix())\n\t\tuevs, err := updatesToEvent(\"get-request\", namePrefix, notif.GetTimestamp(), notif.GetUpdate(), prefixTags, meta)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tevs = append(evs, uevs...)\n\t}\n\tfor _, ep := range eps {\n\t\tevs = ep.Apply(evs...)\n\t}\n\treturn evs, nil\n}\n\nfunc updatesToEvent(name, prefix string, ts int64, upds []*gnmi.Update, tags, meta map[string]string) ([]*EventMsg, error) {\n\tevs := make([]*EventMsg, 0, len(upds))\n\tfor _, upd := range upds {\n\t\te, err := updateToEvent(name, prefix, ts, upd, tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taddMetaTags(e, meta)\n\t\tif (e != nil && e != &EventMsg{}) {\n\t\t\tevs = append(evs, e)\n\t\t}\n\t}\n\treturn evs, nil\n}\n\nfunc updateToEvent(name, prefix string, ts int64, upd *gnmi.Update, tags map[string]string) (*EventMsg, error) {\n\te := &EventMsg{\n\t\tName:      name,\n\t\tTimestamp: ts,\n\t\tTags:      make(map[string]string),\n\t\tValues:    make(map[string]interface{}),\n\t}\n\tfor k, v := range tags {\n\t\te.Tags[k] = v\n\t}\n\tpathName, pTags := tagsFromGNMIPath(upd.GetPath())\n\tpsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tpsb.Reset()\n\t\tstringBuilderPool.Put(psb)\n\t}()\n\tpsb.WriteString(strings.TrimRight(prefix, \"/\"))\n\tpsb.WriteString(\"/\")\n\tpsb.WriteString(strings.TrimLeft(pathName, \"/\"))\n\tpathName = psb.String()\n\tfor k, v := range pTags {\n\t\tif vv, ok := e.Tags[k]; ok {\n\t\t\tif v != vv {\n\t\t\t\te.Tags[fmt.Sprintf(\"%s_%s\", pathName, k)] = v\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\te.Tags[k] = v\n\t}\n\tvar err error\n\te.Values, err = getValueFlat(pathName, upd.GetVal())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e, nil\n}\n\nfunc deleteToEvent(name, prefix string, ts int64, del *gnmi.Path, tags map[string]string) *EventMsg {\n\te := &EventMsg{\n\t\tName:      name,\n\t\tTimestamp: ts,\n\t\tTags:      make(map[string]string),\n\t\tDeletes:   make([]string, 0, 1),\n\t}\n\tfor k, v := range tags {\n\t\te.Tags[k] = v\n\t}\n\tpathName, pTags := tagsFromGNMIPath(del)\n\tpsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tpsb.Reset()\n\t\tstringBuilderPool.Put(psb)\n\t}()\n\tpsb.WriteString(strings.TrimRight(prefix, \"/\"))\n\tpsb.WriteString(\"/\")\n\tpsb.WriteString(strings.TrimLeft(pathName, \"/\"))\n\tpathName = psb.String()\n\tfor k, v := range pTags {\n\t\tif vv, ok := e.Tags[k]; ok {\n\t\t\tif v != vv {\n\t\t\t\te.Tags[fmt.Sprintf(\"%s_%s\", pathName, k)] = v\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\te.Tags[k] = v\n\t}\n\te.Deletes = append(e.Deletes, pathName)\n\treturn e\n}\n\n// tagsFromGNMIPath returns a string representation of the gNMI path without keys,\n// as well as a map of the keys in the path.\n// the key map will also contain a target value if present in the gNMI path.\nfunc tagsFromGNMIPath(p *gnmi.Path) (string, map[string]string) {\n\tif p == nil {\n\t\treturn \"\", nil\n\t}\n\ttags := make(map[string]string)\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tif p.Origin != \"\" {\n\t\tsb.WriteString(p.Origin)\n\t\tsb.WriteString(\":\")\n\t}\n\tfor _, e := range p.GetElem() {\n\t\tif e.Name != \"\" {\n\t\t\tsb.WriteString(\"/\")\n\t\t\tsb.WriteString(e.Name)\n\t\t}\n\t\tif e.Key != nil {\n\t\t\tksb := stringBuilderPool.Get().(*strings.Builder)\n\t\t\tfor k, v := range e.Key {\n\t\t\t\tif e.Name == \"\" {\n\t\t\t\t\ttags[k] = v\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\telems := strings.Split(e.Name, \":\")\n\t\t\t\tksb.WriteString(elems[len(elems)-1])\n\t\t\t\tksb.WriteString(\"_\")\n\t\t\t\tksb.WriteString(k)\n\t\t\t\ttags[ksb.String()] = v\n\t\t\t\tksb.Reset()\n\t\t\t}\n\t\t\tstringBuilderPool.Put(ksb)\n\t\t}\n\t}\n\tif p.GetTarget() != \"\" {\n\t\ttags[\"target\"] = p.GetTarget()\n\t}\n\treturn sb.String(), tags\n}\n\nfunc normalizeEmptyRFC7951(v any) any {\n\tswitch t := v.(type) {\n\tcase nil:\n\t\t// presence for 'empty'\n\t\treturn true\n\n\tcase []any:\n\t\t// handle single null element\n\t\tif len(t) == 1 && t[0] == nil {\n\t\t\treturn true\n\t\t}\n\t\t// recurse to catch nested cases\n\t\tfor i := range t {\n\t\t\tt[i] = normalizeEmptyRFC7951(t[i])\n\t\t}\n\t\treturn t\n\n\tcase map[string]any:\n\t\t// recurse to catch nested cases\n\t\tfor k, vv := range t {\n\t\t\tt[k] = normalizeEmptyRFC7951(vv)\n\t\t}\n\t\treturn t\n\n\tdefault:\n\t\treturn v\n\t}\n}\n\nfunc getValueFlat(prefix string, updValue *gnmi.TypedValue) (map[string]interface{}, error) {\n\tif updValue == nil {\n\t\treturn nil, nil\n\t}\n\tvar jsondata []byte\n\tvalues := make(map[string]interface{})\n\tswitch updValue.Value.(type) {\n\tcase *gnmi.TypedValue_AsciiVal:\n\t\tvalues[prefix] = updValue.GetAsciiVal()\n\tcase *gnmi.TypedValue_BoolVal:\n\t\tvalues[prefix] = updValue.GetBoolVal()\n\tcase *gnmi.TypedValue_BytesVal:\n\t\tvalues[prefix] = updValue.GetBytesVal()\n\tcase *gnmi.TypedValue_DecimalVal:\n\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\tv := updValue.GetDecimalVal()\n\t\tvalues[prefix] = float64(v.Digits) / math.Pow10(int(v.Precision))\n\tcase *gnmi.TypedValue_FloatVal:\n\t\t//lint:ignore SA1019 still need GetFloatVal for backward compatibility\n\t\tvalues[prefix] = updValue.GetFloatVal()\n\tcase *gnmi.TypedValue_DoubleVal:\n\t\tvalues[prefix] = updValue.GetDoubleVal()\n\tcase *gnmi.TypedValue_IntVal:\n\t\tvalues[prefix] = updValue.GetIntVal()\n\tcase *gnmi.TypedValue_StringVal:\n\t\tvalues[prefix] = updValue.GetStringVal()\n\tcase *gnmi.TypedValue_UintVal:\n\t\tvalues[prefix] = updValue.GetUintVal()\n\tcase *gnmi.TypedValue_LeaflistVal:\n\t\tleafListVals := make([]interface{}, 0)\n\t\tfor _, tv := range updValue.GetLeaflistVal().GetElement() {\n\t\t\tv, err := getValue(tv)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tleafListVals = append(leafListVals, v)\n\t\t}\n\t\tvalues[prefix] = leafListVals\n\tcase *gnmi.TypedValue_ProtoBytes:\n\t\tvalues[prefix] = updValue.GetProtoBytes()\n\tcase *gnmi.TypedValue_AnyVal:\n\t\tvalues[prefix] = updValue.GetAnyVal()\n\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\tjsondata = updValue.GetJsonIetfVal()\n\tcase *gnmi.TypedValue_JsonVal:\n\t\tjsondata = updValue.GetJsonVal()\n\t}\n\tif len(jsondata) != 0 {\n\t\tvar value interface{}\n\t\terr := json.Unmarshal(jsondata, &value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalue = normalizeEmptyRFC7951(value)\n\n\t\tswitch value := value.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tf := flattener.NewFlattener()\n\t\t\tf.SetPrefix(prefix)\n\t\t\tvalues, err = f.Flatten(value)\n\t\tdefault:\n\t\t\tvalues[prefix] = value\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn values, nil\n}\n\nfunc (e *EventMsg) ToMap() map[string]interface{} {\n\tif e == nil {\n\t\treturn nil\n\t}\n\tm := make(map[string]interface{})\n\tif e.Name != \"\" {\n\t\tm[\"name\"] = e.Name\n\t}\n\tif e.Timestamp != 0 {\n\t\tm[\"timestamp\"] = e.Timestamp\n\t}\n\tif len(e.Tags) > 0 {\n\t\tin := make(map[string]interface{})\n\t\tfor k, v := range e.Tags {\n\t\t\tin[k] = v\n\t\t}\n\t\tm[\"tags\"] = in\n\t}\n\tif len(e.Values) > 0 {\n\t\tm[\"values\"] = e.Values\n\t}\n\tif len(e.Deletes) > 0 {\n\t\tm[\"deletes\"] = e.Deletes\n\t}\n\treturn m\n}\n\nfunc EventFromMap(m map[string]interface{}) (*EventMsg, error) {\n\tif m == nil {\n\t\treturn nil, nil\n\t}\n\te := new(EventMsg)\n\n\tif v, ok := m[\"name\"]; ok {\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\te.Name = v\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"could not convert map to event message, name it not a string\")\n\t\t}\n\t}\n\tif v, ok := m[\"timestamp\"]; ok {\n\t\ti := num64(v)\n\t\tif i == nil {\n\t\t\treturn nil, fmt.Errorf(\"could not convert map to event message, timestamp is not an int64: %T\", v)\n\t\t}\n\t\tswitch i := i.(type) {\n\t\tcase int64:\n\t\t\te.Timestamp = i\n\t\tcase uint64:\n\t\t\te.Timestamp = int64(i)\n\t\t}\n\n\t}\n\tif v, ok := m[\"tags\"]; ok {\n\t\tswitch v := v.(type) {\n\t\tcase map[string]string:\n\t\t\te.Tags = v\n\t\tcase map[string]interface{}:\n\t\t\te.Tags = make(map[string]string)\n\t\t\tfor k, v := range v {\n\t\t\t\te.Tags[k], _ = v.(string)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"could not convert map to event message, tags are not a map[string]string\")\n\t\t}\n\t}\n\tif v, ok := m[\"values\"]; ok {\n\t\tswitch v := v.(type) {\n\t\tcase map[string]interface{}:\n\t\t\te.Values = v\n\t\tcase map[string]string:\n\t\t\te.Values = make(map[string]interface{})\n\t\t\tfor k, v := range v {\n\t\t\t\te.Values[k] = v\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"could not convert map to event message, values are not a map[string]interface{}\")\n\t\t}\n\t}\n\tif v, ok := m[\"deletes\"]; ok {\n\t\tswitch v := v.(type) {\n\t\tcase []string:\n\t\t\te.Deletes = v\n\t\tcase []interface{}:\n\t\t\tfor _, d := range v {\n\t\t\t\tif ds, ok := d.(string); ok {\n\t\t\t\t\te.Deletes = append(e.Deletes, ds)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"could not convert map to event message, name it not a string\")\n\t\t}\n\t}\n\treturn e, nil\n}\n\nfunc num64(n interface{}) interface{} {\n\tswitch n := n.(type) {\n\tcase int:\n\t\treturn int64(n)\n\tcase int8:\n\t\treturn int64(n)\n\tcase int16:\n\t\treturn int64(n)\n\tcase int32:\n\t\treturn int64(n)\n\tcase int64:\n\t\treturn int64(n)\n\tcase uint:\n\t\treturn uint64(n)\n\tcase uintptr:\n\t\treturn uint64(n)\n\tcase uint8:\n\t\treturn uint64(n)\n\tcase uint16:\n\t\treturn uint64(n)\n\tcase uint32:\n\t\treturn uint64(n)\n\tcase uint64:\n\t\treturn uint64(n)\n\tcase float64:\n\t\treturn uint64(n)\n\t}\n\treturn nil\n}\n\nfunc addMetaTags(e *EventMsg, meta map[string]string) {\n\tfor k, v := range meta {\n\t\tif k == \"format\" {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := e.Tags[k]; ok {\n\t\t\te.Tags[fmt.Sprintf(\"meta_%s\", k)] = v\n\t\t\tcontinue\n\t\t}\n\t\te.Tags[k] = v\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_add_tag/event_add_tag.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_add_tag\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-add-tag\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// addTag adds a set of tags to the event message if certain criteria's are met.\ntype addTag struct {\n\tformatters.BaseProcessor\n\tCondition  string            `mapstructure:\"condition,omitempty\"`\n\tTags       []string          `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string          `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tTagNames   []string          `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string          `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tDeletes    []string          `mapstructure:\"deletes,omitempty\" json:\"deletes,omitempty\"`\n\tOverwrite  bool              `mapstructure:\"overwrite,omitempty\" json:\"overwrite,omitempty\"`\n\tAdd        map[string]string `mapstructure:\"add,omitempty\" json:\"add,omitempty\"`\n\tDebug      bool              `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttags       []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\tdeletes    []*regexp.Regexp\n\tcode       *gojq.Code\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &addTag{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *addTag) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\tif p.Condition != \"\" {\n\t\tp.Condition = strings.TrimSpace(p.Condition)\n\t\tq, err := gojq.Parse(p.Condition)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.code, err = gojq.Compile(q)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// init tags regex\n\tp.tags, err = compileRegex(p.Tags)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init tag names regex\n\tp.tagNames, err = compileRegex(p.TagNames)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init values regex\n\tp.values, err = compileRegex(p.Values)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init value names regex\n\tp.valueNames, err = compileRegex(p.ValueNames)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init deletes regex\n\tp.deletes, err = compileRegex(p.Deletes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *addTag) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// condition is set\n\t\tif p.code != nil && p.Condition != \"\" {\n\t\t\tok, err := formatters.CheckCondition(p.code, e)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"condition check failed: %v\", err)\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\tp.addTags(e)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// no condition, check regexes\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range p.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tp.addTags(e)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range p.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\tp.addTags(e)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range p.tagNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tp.addTags(e)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range p.tags {\n\t\t\t\tif re.MatchString(v) {\n\t\t\t\t\tp.addTags(e)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, k := range e.Deletes {\n\t\t\tfor _, re := range p.deletes {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tp.addTags(e)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (p *addTag) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *addTag) addTags(e *formatters.EventMsg) {\n\tif e.Tags == nil {\n\t\te.Tags = make(map[string]string)\n\t}\n\tfor nk, nv := range p.Add {\n\t\tif p.Overwrite {\n\t\t\te.Tags[nk] = nv\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := e.Tags[nk]; !ok {\n\t\t\te.Tags[nk] = nv\n\t\t}\n\t}\n}\n\nfunc compileRegex(expr []string) ([]*regexp.Regexp, error) {\n\tres := make([]*regexp.Regexp, 0, len(expr))\n\tfor _, reg := range expr {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, re)\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_add_tag/event_add_tag_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_add_tag\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"match_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": `.values.value == 1`,\n\t\t\t\"add\":       map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_condition_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": `.values.value == 1`,\n\t\t\t\"add\":       map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\"overwrite\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// match value name\n\t\"match_value_name_add\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"value\"},\n\t\t\t\"add\":         map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_name_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\":       true,\n\t\t\t\"value-names\": []string{\"value\"},\n\t\t\t\"overwrite\":   true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_name_add_many\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"value\"},\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_name_add_many_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"value\"},\n\t\t\t\"overwrite\":   true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// match value\n\t\"match_value_add\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\": []string{\"value\"},\n\t\t\t\"add\":    map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"v\": \"value\"},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"v\": \"value\"},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"value\"},\n\t\t\t\"overwrite\":   true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_add_many\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\": []string{\"value\"},\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_add_many_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\":    []string{\"value\"},\n\t\t\t\"overwrite\": true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": \"value\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// match tag name\n\t\"match_tag_name_add\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\".\"},\n\t\t\t\"add\":       map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_name_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\".\"},\n\t\t\t\"overwrite\": true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_name_add_many\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\".\"},\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_name_add_many_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\".\"},\n\t\t\t\"overwrite\": true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// match tag\n\t\"match_tag_add\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"tag_value\"},\n\t\t\t\"add\":  map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"old_tag\": \"tag_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"old_value\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"old_value\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\":      []string{\"tag_value\"},\n\t\t\t\"overwrite\": true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"tag_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"new_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"old_value\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_add_many\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"1\"},\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_add_many_overwrite\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"1\"},\n\t\t\t\"overwrite\": true,\n\t\t\t\"add\": map[string]string{\n\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"new_tag\",\n\t\t\t\t\t\t\t\"tag2\": \"new_tag2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// match delete\n\t\"match_delete_add\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"deletes\": []string{\"^deleted_path.*\"},\n\t\t\t\"add\":     map[string]string{\"tag1\": \"new_tag\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:    map[string]string{\"old_tag\": \"tag_value\"},\n\t\t\t\t\t\tDeletes: []string{\"deleted_path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"new_tag\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDeletes: []string{\"deleted_path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"old_value\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDeletes: []string{\"non_matching_deleted_path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"old_tag\": \"tag_value\",\n\t\t\t\t\t\t\t\"tag1\":    \"old_value\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDeletes: []string{\"non_matching_deleted_path\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventAddTag(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d, expected: %+v\", name, i, j, item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d,      got: %+v\", name, i, j, outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_allow/event_allow.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_allow\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-allow\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// allow Allows the msg if ANY of the Tags or Values regexes are matched\ntype allow struct {\n\tformatters.BaseProcessor\n\tCondition  string   `mapstructure:\"condition,omitempty\"`\n\tTagNames   []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tTags       []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\ttags       []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\tcode       *gojq.Code\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &allow{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (d *allow) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\td.Condition = strings.TrimSpace(d.Condition)\n\tq, err := gojq.Parse(d.Condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.code, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init tag keys regex\n\td.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames))\n\tfor _, reg := range d.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tagNames = append(d.tagNames, re)\n\t}\n\td.tags = make([]*regexp.Regexp, 0, len(d.Tags))\n\tfor _, reg := range d.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tags = append(d.tags, re)\n\t}\n\t//\n\td.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames))\n\tfor _, reg := range d.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.valueNames = append(d.valueNames, re)\n\t}\n\n\td.values = make([]*regexp.Regexp, 0, len(d.values))\n\tfor _, reg := range d.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.values = append(d.values, re)\n\t}\n\tif d.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(d)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"initialized processor '%s': %+v\", processorType, d)\n\t\t\treturn nil\n\t\t}\n\t\td.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (d *allow) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\ti := 0\n\tfor _, e := range es {\n\t\tif d.allow(e) {\n\t\t\tes[i] = e\n\t\t\ti++\n\t\t}\n\t}\n\tfor j := i; j < len(es); j++ {\n\t\tes[j] = nil\n\t}\n\tes = es[:i]\n\treturn es\n}\n\nfunc (d *allow) WithLogger(l *log.Logger) {\n\tif d.Debug && l != nil {\n\t\td.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if d.Debug {\n\t\td.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (d *allow) allow(e *formatters.EventMsg) bool {\n\tif d.Condition != \"\" {\n\t\tok, err := formatters.CheckCondition(d.code, e)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"condition check failed: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\treturn ok\n\t}\n\tfor k, v := range e.Values {\n\t\tfor _, re := range d.valueNames {\n\t\t\tif re.MatchString(k) {\n\t\t\t\td.logger.Printf(\"value name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tfor _, re := range d.values {\n\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\td.logger.Printf(\"value '%s' matched regex '%s'\", v, re.String())\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor k, v := range e.Tags {\n\t\tfor _, re := range d.tagNames {\n\t\t\tif re.MatchString(k) {\n\t\t\t\td.logger.Printf(\"tag name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tfor _, re := range d.tags {\n\t\t\tif re.MatchString(v) {\n\t\t\t\td.logger.Printf(\"tag '%s' matched regex '%s'\", v, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/formatters/event_allow/event_allow_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_allow\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"allow_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": \".values.value == 1\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"allow_value_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^number$\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"not-number\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"allow_tag_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"^name*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"not-name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"allow_tag_values\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"router1\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"router1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"not-name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"router1\"},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"allow_multiple_value_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\n\t\t\t\t\"^number$\",\n\t\t\t\t\"^name$\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"not-number\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": \"123\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"not-name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": \"123\"},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"allow_multiple_tag_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\n\t\t\t\t\"^id$\",\n\t\t\t\t\"^name$\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"not-name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"id\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"id\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t\t//{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventAllow(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event allow, item %d, index %d\", i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_combine/event_combine.go",
    "content": "// © 2023 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_combine_test\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-combine\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// combine allows running multiple processors together based on conditions\ntype combine struct {\n\tformatters.BaseProcessor\n\tProcessors []*procseq `mapstructure:\"processors,omitempty\"`\n\tDebug      bool       `mapstructure:\"debug,omitempty\"`\n\n\tprocessorsDefinitions map[string]map[string]any\n\ttargetsConfigs        map[string]*types.TargetConfig\n\tactionsDefinitions    map[string]map[string]any\n\n\tlogger *log.Logger\n}\n\ntype procseq struct {\n\tCondition string `mapstructure:\"condition,omitempty\"`\n\tName      string `mapstructure:\"name,omitempty\"`\n\n\tcondition *gojq.Code\n\tproc      formatters.EventProcessor\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &combine{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *combine) Init(cfg any, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\tif len(p.Processors) == 0 {\n\t\treturn fmt.Errorf(\"missing processors definition\")\n\t}\n\tfor i, proc := range p.Processors {\n\t\tif proc == nil {\n\t\t\treturn fmt.Errorf(\"missing processor(#%d) definition\", i)\n\t\t}\n\t\tif proc.Name == \"\" {\n\t\t\treturn fmt.Errorf(\"invalid processor(#%d) definition: missing name\", i)\n\t\t}\n\t\t// init condition if it's set\n\t\tif proc.Condition != \"\" {\n\t\t\tproc.Condition = strings.TrimSpace(proc.Condition)\n\t\t\tq, err := gojq.Parse(proc.Condition)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tproc.condition, err = gojq.Compile(q)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t// init subprocessors\n\t\tif epCfg, ok := p.processorsDefinitions[proc.Name]; ok {\n\t\t\tepType := \"\"\n\t\t\tfor k := range epCfg {\n\t\t\t\tepType = k\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif in, ok := formatters.EventProcessors[epType]; ok {\n\t\t\t\tproc.proc = in()\n\t\t\t\terr := proc.proc.Init(epCfg[epType],\n\t\t\t\t\tformatters.WithLogger(p.logger),\n\t\t\t\t\tformatters.WithTargets(p.targetsConfigs),\n\t\t\t\t\tformatters.WithActions(p.actionsDefinitions),\n\t\t\t\t\tformatters.WithProcessors(p.processorsDefinitions),\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed initializing event processor '%s' of type='%s': %v\", proc.Name, epType, err)\n\t\t\t\t}\n\t\t\t\tp.logger.Printf(\"added event processor '%s' of type=%s to combine processor\", proc.Name, epType)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"%q event processor has an unknown type=%q\", proc.Name, epType)\n\t\t}\n\t\treturn fmt.Errorf(\"%q event processor not found\", proc.Name)\n\t}\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *combine) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tles := len(es)\n\n\tin := make([]*formatters.EventMsg, 0, les)\n\tout := make([]*formatters.EventMsg, 0, les)\n\tfor _, proc := range p.Processors {\n\t\tin = in[:0]\n\t\tout = out[:0]\n\n\t\tfor i, e := range es {\n\t\t\tok, err := formatters.CheckCondition(proc.condition, e)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"condition check failed: %v\", err)\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\tif p.Debug {\n\t\t\t\t\tp.logger.Printf(\"processor #%d include: %s\", i, e)\n\t\t\t\t}\n\t\t\t\tin = append(in, e)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif p.Debug {\n\t\t\t\tp.logger.Printf(\"processor #%d exclude: %s\", i, e)\n\t\t\t}\n\t\t\tout = append(out, e)\n\t\t}\n\n\t\tin = proc.proc.Apply(in...)\n\t\tes = es[:0]\n\t\tes = append(es, in...)\n\t\tes = append(es, out...)\n\t\tif len(es) > 1 {\n\t\t\tsort.Slice(es, func(i, j int) bool {\n\t\t\t\treturn es[i].Timestamp < es[j].Timestamp\n\t\t\t})\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (s *combine) WithLogger(l *log.Logger) {\n\tif s.Debug && l != nil {\n\t\ts.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if s.Debug {\n\t\ts.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (s *combine) WithTargets(tcs map[string]*types.TargetConfig) {\n\ts.targetsConfigs = tcs\n}\n\nfunc (s *combine) WithActions(act map[string]map[string]any) {\n\ts.actionsDefinitions = act\n}\n\nfunc (s *combine) WithProcessors(procs map[string]map[string]any) {\n\ts.processorsDefinitions = procs\n}\n"
  },
  {
    "path": "pkg/formatters/event_combine/event_combine_test/event_combine_test.go",
    "content": "// © 2023 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_sequence\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/all\"\n)\n\nfunc Test_combine_Apply(t *testing.T) {\n\ttype fields struct {\n\t\tprocessorConfig map[string]any\n\t\tprocessorsSet   map[string]map[string]any\n\t}\n\ttype args struct {\n\t\tes []*formatters.EventMsg\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t\twant   []*formatters.EventMsg\n\t}{\n\t\t{\n\t\t\tname: \"simple1\",\n\t\t\tfields: fields{\n\t\t\t\tprocessorConfig: map[string]any{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"processors\": []any{\n\t\t\t\t\t\tmap[string]any{\n\t\t\t\t\t\t\t\"condition\": \".tags.tag == \\\"t1\\\"\",\n\t\t\t\t\t\t\t\"name\":      \"proc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tmap[string]any{\n\t\t\t\t\t\t\t\"name\": \"proc2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tprocessorsSet: map[string]map[string]any{\n\t\t\t\t\t\"proc1\": {\n\t\t\t\t\t\t\"event-strings\": map[string]any{\n\t\t\t\t\t\t\t\"value-names\": []string{\"^number$\"},\n\t\t\t\t\t\t\t\"transforms\": []map[string]any{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"replace\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"apply-on\": \"name\",\n\t\t\t\t\t\t\t\t\t\t\"old\":      \"number\",\n\t\t\t\t\t\t\t\t\t\t\"new\":      \"new_number\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"proc2\": {\n\t\t\t\t\t\t\"event-strings\": map[string]any{\n\t\t\t\t\t\t\t\"tag-names\": []string{\"^tag$\"},\n\t\t\t\t\t\t\t\"transforms\": []map[string]any{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"replace\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"apply-on\": \"name\",\n\t\t\t\t\t\t\t\t\t\t\"old\":      \"tag\",\n\t\t\t\t\t\t\t\t\t\t\"new\":      \"new_tag\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"tag\": \"t1\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"t\": \"t1\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\"n\": \"42\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tTags:   map[string]string{\"new_tag\": \"t1\"},\n\t\t\t\t\tValues: map[string]interface{}{\"new_number\": \"42\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tTags:   map[string]string{\"t\": \"t1\"},\n\t\t\t\t\tValues: map[string]interface{}{\"n\": \"42\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"simple2\",\n\t\t\tfields: fields{\n\t\t\t\tprocessorConfig: map[string]any{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"processors\": []any{\n\t\t\t\t\t\tmap[string]any{\n\t\t\t\t\t\t\t\"condition\": \".tags.tag == \\\"t2\\\"\",\n\t\t\t\t\t\t\t\"name\":      \"proc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tmap[string]any{\n\t\t\t\t\t\t\t\"name\": \"proc2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tprocessorsSet: map[string]map[string]any{\n\t\t\t\t\t\"proc1\": {\n\t\t\t\t\t\t\"event-strings\": map[string]any{\n\t\t\t\t\t\t\t\"value-names\": []string{\"^number$\"},\n\t\t\t\t\t\t\t\"transforms\": []map[string]any{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"replace\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"apply-on\": \"name\",\n\t\t\t\t\t\t\t\t\t\t\"old\":      \"number\",\n\t\t\t\t\t\t\t\t\t\t\"new\":      \"new_number\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"proc2\": {\n\t\t\t\t\t\t\"event-strings\": map[string]any{\n\t\t\t\t\t\t\t\"tag-names\": []string{\"^tag$\"},\n\t\t\t\t\t\t\t\"transforms\": []map[string]any{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"replace\": map[string]any{\n\t\t\t\t\t\t\t\t\t\t\"apply-on\": \"name\",\n\t\t\t\t\t\t\t\t\t\t\"old\":      \"tag\",\n\t\t\t\t\t\t\t\t\t\t\"new\":      \"new_tag\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"tag\": \"t1\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"t\": \"t1\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\"n\": \"42\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tTags:   map[string]string{\"new_tag\": \"t1\"},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tTags:   map[string]string{\"t\": \"t1\"},\n\t\t\t\t\tValues: map[string]interface{}{\"n\": \"42\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tin := formatters.EventProcessors[\"event-combine\"]\n\t\t\tp := in()\n\t\t\terr := p.Init(tt.fields.processorConfig, formatters.WithProcessors(tt.fields.processorsSet))\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"%s failed to init the processor: %v\", tt.name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif got := p.Apply(tt.args.es...); !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"combine.Apply() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_convert/event_convert.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_convert\n\nimport (\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-convert\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// convert converts the value with key matching one of regexes, to the specified Type\ntype convert struct {\n\tformatters.BaseProcessor\n\tValues []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tType   string   `mapstructure:\"type,omitempty\" json:\"type,omitempty\"`\n\tDebug  bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalues []*regexp.Regexp\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &convert{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (c *convert) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tc.values = make([]*regexp.Regexp, 0, len(c.Values))\n\tfor _, reg := range c.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.values = append(c.values, re)\n\t}\n\tif c.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(c)\n\t\tif err != nil {\n\t\t\tc.logger.Printf(\"initialized processor '%s': %+v\", processorType, c)\n\t\t\treturn nil\n\t\t}\n\t\tc.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (c *convert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range c.values {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tc.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tswitch c.Type {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tiv, err := convertToInt(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.logger.Printf(\"convert error: %v\", err)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to %s: %d\", k, v, c.Type, iv)\n\t\t\t\t\t\te.Values[k] = iv\n\t\t\t\t\tcase \"uint\":\n\t\t\t\t\t\tiv, err := convertToUint(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.logger.Printf(\"convert error: %v\", err)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to %s: %d\", k, v, c.Type, iv)\n\t\t\t\t\t\te.Values[k] = iv\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tiv, err := convertToString(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.logger.Printf(\"convert error: %v\", err)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to %s: %s\", k, v, c.Type, iv)\n\t\t\t\t\t\te.Values[k] = iv\n\t\t\t\t\tcase \"float\":\n\t\t\t\t\t\tiv, err := convertToFloat(v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tc.logger.Printf(\"convert error: %v\", err)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to %s: %f\", k, v, c.Type, iv)\n\t\t\t\t\t\te.Values[k] = iv\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (c *convert) WithLogger(l *log.Logger) {\n\tif c.Debug && l != nil {\n\t\tc.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if c.Debug {\n\t\tc.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc convertToInt(i interface{}) (int, error) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\tiv, err := strconv.Atoi(i)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn iv, nil\n\tcase int:\n\t\treturn i, nil\n\tcase int8:\n\t\treturn int(i), nil\n\tcase int16:\n\t\treturn int(i), nil\n\tcase int32:\n\t\treturn int(i), nil\n\tcase int64:\n\t\treturn int(i), nil\n\tcase uint:\n\t\treturn int(i), nil\n\tcase uint8:\n\t\treturn int(i), nil\n\tcase uint16:\n\t\treturn int(i), nil\n\tcase uint32:\n\t\treturn int(i), nil\n\tcase uint64:\n\t\treturn int(i), nil\n\tcase float64:\n\t\treturn int(i), nil\n\tcase float32:\n\t\treturn int(i), nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"cannot convert %v to int, type %T\", i, i)\n\t}\n}\n\nfunc convertToUint(i interface{}) (uint, error) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\tiv, err := strconv.Atoi(i)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn uint(iv), nil\n\tcase int:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase int8:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase int16:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase int32:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase int64:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase uint:\n\t\treturn i, nil\n\tcase uint8:\n\t\treturn uint(i), nil\n\tcase uint16:\n\t\treturn uint(i), nil\n\tcase uint32:\n\t\treturn uint(i), nil\n\tcase uint64:\n\t\treturn uint(i), nil\n\tcase float32:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tcase float64:\n\t\tif i < 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn uint(i), nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"cannot convert %v to uint, type %T\", i, i)\n\t}\n}\n\nfunc convertToFloat(i interface{}) (float64, error) {\n\tswitch i := i.(type) {\n\tcase []uint8:\n\t\tif len(i) == 4 {\n\t\t\treturn float64(math.Float32frombits(binary.BigEndian.Uint32([]byte(i)))), nil\n\t\t} else if len(i) == 8 {\n\t\t\treturn float64(math.Float64frombits(binary.BigEndian.Uint64([]byte(i)))), nil\n\t\t} else {\n\t\t\treturn 0, nil\n\t\t}\n\tcase string:\n\t\tiv, err := strconv.ParseFloat(i, 64)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn iv, nil\n\tcase int:\n\t\treturn float64(i), nil\n\tcase int8:\n\t\treturn float64(i), nil\n\tcase int16:\n\t\treturn float64(i), nil\n\tcase int32:\n\t\treturn float64(i), nil\n\tcase int64:\n\t\treturn float64(i), nil\n\tcase uint:\n\t\treturn float64(i), nil\n\tcase uint8:\n\t\treturn float64(i), nil\n\tcase uint16:\n\t\treturn float64(i), nil\n\tcase uint32:\n\t\treturn float64(i), nil\n\tcase uint64:\n\t\treturn float64(i), nil\n\tcase float64:\n\t\treturn i, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"cannot convert %v to float64, type %T\", i, i)\n\t}\n}\n\nfunc convertToString(i interface{}) (string, error) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\treturn i, nil\n\tcase int:\n\t\treturn strconv.Itoa(i), nil\n\tcase int8:\n\t\treturn strconv.Itoa(int(i)), nil\n\tcase int16:\n\t\treturn strconv.Itoa(int(i)), nil\n\tcase int32:\n\t\treturn strconv.Itoa(int(i)), nil\n\tcase int64:\n\t\treturn strconv.Itoa(int(i)), nil\n\tcase uint:\n\t\treturn strconv.FormatUint(uint64(i), 10), nil\n\tcase uint8:\n\t\treturn strconv.FormatUint(uint64(i), 10), nil\n\tcase uint16:\n\t\treturn strconv.FormatUint(uint64(i), 10), nil\n\tcase uint32:\n\t\treturn strconv.FormatUint(uint64(i), 10), nil\n\tcase uint64:\n\t\treturn strconv.FormatUint(uint64(i), 10), nil\n\tcase float64:\n\t\treturn strconv.FormatFloat(i, 'f', -1, 64), nil\n\tcase bool:\n\t\treturn strconv.FormatBool(i), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"cannot convert %v to string, type %T\", i, i)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_convert/event_convert_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_convert\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"string_convert\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\n\t\t\t\t\"^convert-me$\",\n\t\t\t\t\"^number*\",\n\t\t\t},\n\t\t\t\"debug\": true,\n\t\t\t\"type\":  \"string\",\n\t\t},\n\t\ttests: []item{\n\t\t\t// nil msg\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// empty msg\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t// non matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching values and tags\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": 100},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"100\"},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2 msgs, with matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": 100},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": 200},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"100\"},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"200\"},\n\t\t\t\t\t\tTags:   map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2 msgs, second with matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": 200},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"convert-me\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"200\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, already a string\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"1\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"convert-me\": \"1\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, uint\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"number1\": uint8(100),\n\t\t\t\t\t\t\t\"number2\": uint16(100),\n\t\t\t\t\t\t\t\"number3\": uint32(100),\n\t\t\t\t\t\t\t\"number4\": uint64(100),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"number1\": \"100\",\n\t\t\t\t\t\t\t\"number2\": \"100\",\n\t\t\t\t\t\t\t\"number3\": \"100\",\n\t\t\t\t\t\t\t\"number4\": \"100\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, float64\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(100.1)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"100.1\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, bool\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": true},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"true\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"int_convert\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^number*\"},\n\t\t\t\"type\":        \"int\",\n\t\t},\n\t\ttests: []item{\n\t\t\t// nil msg\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// empty msg\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t// non matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching values and tags\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"100\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2 msgs, with matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"100\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"200\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(200)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2 msgs, second with matching values\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"200\"},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(200)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, already an int\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, uint\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"number1\": uint8(100),\n\t\t\t\t\t\t\t\"number2\": uint16(100),\n\t\t\t\t\t\t\t\"number3\": uint32(100),\n\t\t\t\t\t\t\t\"number4\": uint64(100),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"number1\": int(100),\n\t\t\t\t\t\t\t\"number2\": int(100),\n\t\t\t\t\t\t\t\"number3\": int(100),\n\t\t\t\t\t\t\t\"number4\": int(100),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, float64\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(100)},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// matching value, bool\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": true},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": true},\n\t\t\t\t\t\tTags:   map[string]string{\"number\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"uint_convert\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^name.*\"},\n\t\t\t\"type\":        \"uint\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  []*formatters.EventMsg{{Values: map[string]interface{}{}}},\n\t\t\t\toutput: []*formatters.EventMsg{{Values: map[string]interface{}{}}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": \"42\"}}},\n\t\t\t\toutput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": uint(42)}}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": uint(42)}}},\n\t\t\t\toutput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": uint(42)}}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": -42}}},\n\t\t\t\toutput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": uint(0)}}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": true}}},\n\t\t\t\toutput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\"name_value_bytes\": true}}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"name_value_bytes1\": int8(74),\n\t\t\t\t\t\t\"name_value_bytes2\": int16(75),\n\t\t\t\t\t\t\"name_value_bytes3\": int32(76),\n\t\t\t\t\t\t\"name_value_bytes4\": int64(77),\n\t\t\t\t\t}}},\n\n\t\t\t\toutput: []*formatters.EventMsg{{\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"name_value_bytes1\": uint(74),\n\t\t\t\t\t\t\"name_value_bytes2\": uint(75),\n\t\t\t\t\t\t\"name_value_bytes3\": uint(76),\n\t\t\t\t\t\t\"name_value_bytes4\": uint(77),\n\t\t\t\t\t}}},\n\t\t\t},\n\t\t},\n\t},\n\t\"float_convert\": {\n\t\tprocessorType: processorType,\n\t\tprocessor:     map[string]interface{}{\"value-names\": []string{\"^number*\"}, \"type\": \"float\"},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": []uint8{62, 192, 0, 0}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(0.375)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": []uint8{64, 9, 33, 251, 84, 68, 45, 24}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(3.141592653589793)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": []uint8{64, 9, 33, 251, 84, 68, 45, 24, 32}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(0)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": []uint8{62, 192, 0, 0}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(0.375)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": \"1.1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(1.1)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": uint(42)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(42)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": int(42)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": float64(42)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": true},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": true},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventConvertToUint(t *testing.T) {\n\tts := testset[\"uint_convert\"]\n\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\tt.Log(\"found processor\")\n\t\tp := pi()\n\t\terr := p.Init(ts.processor, formatters.WithLogger(nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"processor: %+v\", p)\n\t\tfor i, item := range ts.tests {\n\t\t\tt.Run(\"uint_convert\", func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\tfor j := range outs {\n\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\tt.Logf(\"failed at uint_convert item %d, index %d\", i, j)\n\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestEventConvertToInt(t *testing.T) {\n\tts := testset[\"int_convert\"]\n\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\tt.Log(\"found processor\")\n\t\tp := pi()\n\t\terr := p.Init(ts.processor, formatters.WithLogger(nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor i, item := range ts.tests {\n\t\t\tt.Run(\"int_convert\", func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\tfor j := range outs {\n\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\tt.Logf(\"failed at int_convert item %d, index %d\", i, j)\n\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestEventConvertToString(t *testing.T) {\n\tts := testset[\"string_convert\"]\n\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\tt.Log(\"found processor\")\n\t\tp := pi()\n\t\terr := p.Init(ts.processor, formatters.WithLogger(nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor i, item := range ts.tests {\n\t\t\tt.Run(\"string_convert\", func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\tfor j := range outs {\n\t\t\t\t\tif !cmp.Equal(outs[j], item.output[j]) {\n\t\t\t\t\t\tt.Logf(\"failed at string_convert item %d, index %d\", i, j)\n\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestEventConvertToFloat(t *testing.T) {\n\tts := testset[\"float_convert\"]\n\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\tt.Log(\"found processor\")\n\t\tp := pi()\n\t\terr := p.Init(ts.processor, formatters.WithLogger(nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor i, item := range ts.tests {\n\t\t\tt.Run(\"float_convert\", func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\tfor j := range outs {\n\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\tt.Logf(\"failed at float_convert item %d, index %d\", i, j)\n\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_data_convert/event_data_convert.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_data_convert\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\tunits \"github.com/bcicen/go-units\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-data-convert\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\nvar stringUnitRegex = regexp.MustCompile(`([+-]?([0-9]*[.])?[0-9]+)\\s?(\\S+)`)\n\n// dataConvert converts the value with key matching one of regexes, to the specified data unit\ntype dataConvert struct {\n\tformatters.BaseProcessor\n\tValues []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tFrom   string   `mapstructure:\"from,omitempty\" json:\"from,omitempty\"`\n\tTo     string   `mapstructure:\"to,omitempty\" json:\"to,omitempty\"`\n\tKeep   bool     `mapstructure:\"keep,omitempty\" json:\"keep,omitempty\"`\n\tOld    string   `mapstructure:\"old,omitempty\" json:\"old,omitempty\"`\n\tNew    string   `mapstructure:\"new,omitempty\" json:\"new,omitempty\"`\n\tDebug  bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalues      []*regexp.Regexp\n\trenameRegex *regexp.Regexp\n\tlogger      *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &dataConvert{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (c *dataConvert) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tc.values = make([]*regexp.Regexp, 0, len(c.Values))\n\tfor _, reg := range c.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.values = append(c.values, re)\n\t}\n\tif c.Old != \"\" {\n\t\tc.renameRegex, err = regexp.Compile(c.Old)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif c.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(c)\n\t\tif err != nil {\n\t\t\tc.logger.Printf(\"initialized processor '%s': %+v\", processorType, c)\n\t\t\treturn nil\n\t\t}\n\t\tc.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\n\treturn nil\n}\n\nfunc (c *dataConvert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// add new Values to a new map to avoid multiple chained regex matches\n\t\tnewValues := make(map[string]interface{})\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range c.values {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tc.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tiv, err := c.convertData(k, v, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.logger.Printf(\"data convert error: %v\", err)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to %s: %f\", k, v, c.To, iv)\n\t\t\t\t\tif c.renameRegex != nil {\n\t\t\t\t\t\tnewValues[c.getNewName(k)] = iv\n\t\t\t\t\t\tif !c.Keep {\n\t\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif c.Keep {\n\t\t\t\t\t\tnewValues[fmt.Sprintf(\"%s_%s\", k, c.To)] = iv\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tnewValues[k] = iv\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// add new values to the original message\n\t\tfor k, v := range newValues {\n\t\t\te.Values[k] = v\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (c *dataConvert) WithLogger(l *log.Logger) {\n\tif c.Debug && l != nil {\n\t\tc.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if c.Debug {\n\t\tc.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (c *dataConvert) convertData(k string, i interface{}, from *units.Unit) (float64, error) {\n\tif from == nil && c.From == \"\" {\n\t\tfrom = unitFromName(k)\n\t}\n\tif from == nil {\n\t\tfr := sToU(c.From)\n\t\tfrom = &fr\n\t}\n\tswitch i := i.(type) {\n\tcase string:\n\t\tiv, err := strconv.Atoi(i)\n\t\tif err != nil {\n\t\t\tv, unit, err := parseStringUnit(i)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn c.convertData(k, v, &unit)\n\t\t}\n\t\treturn c.convertData(k, iv, nil)\n\tcase int:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase int8:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase int16:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase int32:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase int64:\n\t\tif from == nil {\n\t\t\t*from = sToU(c.From)\n\t\t}\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase uint:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase uint8:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase uint16:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase uint32:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase uint64:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase float64:\n\t\tcv, err := units.ConvertFloat(i, *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tcase float32:\n\t\tcv, err := units.ConvertFloat(float64(i), *from, sToU(c.To))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn cv.Float(), nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"cannot convert %v, type %T\", i, i)\n\t}\n}\n\nfunc sToU(s string) units.Unit {\n\tswitch s {\n\tcase \"b\":\n\t\treturn units.Bit\n\tcase \"kb\":\n\t\treturn units.KiloBit\n\tcase \"mb\":\n\t\treturn units.MegaBit\n\tcase \"gb\":\n\t\treturn units.GigaBit\n\tcase \"tb\":\n\t\treturn units.TeraBit\n\tcase \"eb\":\n\t\treturn units.ExaBit\n\t//\n\tcase \"B\":\n\t\treturn units.Byte\n\tcase \"KB\":\n\t\treturn units.KiloByte\n\tcase \"MB\":\n\t\treturn units.MegaByte\n\tcase \"GB\":\n\t\treturn units.GigaByte\n\tcase \"TB\":\n\t\treturn units.TeraByte\n\tcase \"EB\":\n\t\treturn units.ExaByte\n\tcase \"ZB\":\n\t\treturn units.ZettaByte\n\tcase \"YB\":\n\t\treturn units.YottaByte\n\t//\n\tcase \"KiB\":\n\t\treturn units.Kibibyte\n\tcase \"MiB\":\n\t\treturn units.Mebibyte\n\tcase \"GiB\":\n\t\treturn units.Gibibyte\n\tcase \"TiB\":\n\t\treturn units.Tebibyte\n\tcase \"EiB\":\n\t\treturn units.Exbibyte\n\tcase \"ZiB\":\n\t\treturn units.Zebibyte\n\tcase \"YiB\":\n\t\treturn units.Yobibyte\n\t//\n\tdefault:\n\t\treturn units.Byte\n\t}\n}\n\nfunc parseStringUnit(s string) (float64, units.Unit, error) {\n\t// derive unit from string\n\tgroups := stringUnitRegex.FindAllSubmatch([]byte(s), -1)\n\tif len(groups) == 0 {\n\t\treturn 0, units.Byte, errors.New(\"failed to parse string submatches\")\n\t}\n\tif len(groups[0]) != 4 {\n\t\treturn 0, units.Byte, errors.New(\"failed to parse string, unexpected number of groups\")\n\t}\n\t// check if the first match is equal to the original value\n\tif string(groups[0][0]) != s {\n\t\treturn 0, units.Byte, errors.New(\"failed to parse string, partial match\")\n\t}\n\tf, err := strconv.ParseFloat(string(groups[0][1]), 64)\n\tif err != nil {\n\t\treturn 0, units.Unit{}, err\n\t}\n\treturn f, sToU(string(groups[0][3])), nil\n}\n\nfunc unitFromName(k string) *units.Unit {\n\tswitch {\n\tcase strings.HasSuffix(k, \"_octets\"), strings.HasSuffix(k, \"_bytes\"), strings.HasSuffix(k, \"-octets\"), strings.HasSuffix(k, \"-bytes\"):\n\t\treturn &units.Byte\n\t}\n\treturn nil\n}\n\nfunc (c *dataConvert) getNewName(k string) string {\n\tif c.renameRegex != nil {\n\t\treturn c.renameRegex.ReplaceAllString(k, c.New)\n\t}\n\treturn k\n}\n"
  },
  {
    "path": "pkg/formatters/event_data_convert/event_data_convert_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_data_convert\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc Test_dataConvert_Apply(t *testing.T) {\n\ttype fields map[string]interface{}\n\ttype args struct {\n\t\tes []*formatters.EventMsg\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t\twant   []*formatters.EventMsg\n\t}{\n\t\t{\n\t\t\tname: \"nil_input\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"one_msg_bytes\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"to\":    \"KB\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\": 1024,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\": float64(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"one_msg_bytes_keep\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"to\":    \"KB\",\n\t\t\t\t\"keep\":  true,\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\": 1024,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\":    1024,\n\t\t\t\t\t\t\"data_total_KB\": float64(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"one_msg_bytes_from\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"from\":  \"KB\",\n\t\t\t\t\"to\":    \"B\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\": 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\": float64(1024),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"one_msg_multiple_values\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"from\":  \"KB\",\n\t\t\t\t\"to\":    \"B\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\":  1,\n\t\t\t\t\t\t\t\"bytes_total\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\":  float64(1024),\n\t\t\t\t\t\t\"bytes_total\": float64(2048),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"two_messages\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"from\":  \"KB\",\n\t\t\t\t\"to\":    \"B\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\":  1,\n\t\t\t\t\t\t\t\"bytes_total\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\":  1,\n\t\t\t\t\t\t\t\"bytes_total\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\":  float64(1024),\n\t\t\t\t\t\t\"bytes_total\": float64(2048),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\":  float64(1024),\n\t\t\t\t\t\t\"bytes_total\": float64(2048),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"string_value_with_unit\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"to\":    \"B\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"data_total\":  \"1 KB\",\n\t\t\t\t\t\t\t\"bytes_total\": \"2KB\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"data_total\":  float64(1024),\n\t\t\t\t\t\t\"bytes_total\": float64(2048),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"one_msg_rename\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\"_total$\",\n\t\t\t\t},\n\t\t\t\t\"to\":    \"KB\",\n\t\t\t\t\"old\":   `^(bytes)(\\S+)`,\n\t\t\t\t\"new\":   \"kilobytes${2}\",\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"bytes_total\": 1024,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"kilobytes_total\": float64(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &dataConvert{}\n\t\t\terr := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, \"[event-data-convert-test]\", log.Flags())))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to init processor in test %q: %v\", tt.name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif got := c.Apply(tt.args.es...); !cmp.Equal(got, tt.want) {\n\t\t\t\tt.Errorf(\"got : %+v\", got)\n\t\t\t\tt.Errorf(\"want: %+v\", tt.want)\n\t\t\t\tt.Errorf(\"dataConvert.Apply() = %+v, want %+v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_date_string/event_date_string.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_date_string\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-date-string\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// dateString converts Tags and/or Values of unix timestamp to a human readable format.\n// Precision specifies the unit of the received timestamp, s, ms, us or ns.\n// DateTimeFormat is the desired datetime format, it defaults to RFC3339\ntype dateString struct {\n\tformatters.BaseProcessor\n\tTags      []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValues    []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tPrecision string   `mapstructure:\"precision,omitempty\" json:\"precision,omitempty\"`\n\tFormat    string   `mapstructure:\"format,omitempty\" json:\"format,omitempty\"`\n\tLocation  string   `mapstructure:\"location,omitempty\" json:\"location,omitempty\"`\n\tDebug     bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttags     []*regexp.Regexp\n\tvalues   []*regexp.Regexp\n\tlocation *time.Location\n\tlogger   *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &dateString{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (d *dateString) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\t// init values regex\n\td.values = make([]*regexp.Regexp, 0, len(d.Values))\n\tfor _, reg := range d.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.values = append(d.values, re)\n\t}\n\t// init tags regex\n\td.tags = make([]*regexp.Regexp, 0, len(d.Tags))\n\tfor _, reg := range d.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tags = append(d.tags, re)\n\t}\n\t// set tz\n\td.location = time.Local\n\tif d.Location != \"\" {\n\t\tloc, err := time.LoadLocation(d.Location)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.location = loc\n\t}\n\tif d.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(d)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"initialized processor '%s': %+v\", processorType, d)\n\t\t\treturn nil\n\t\t}\n\t\td.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (d *dateString) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range d.values {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tiv, err := convertToInt(v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\td.logger.Printf(\"failed to convert '%v' to date string: %v\", v, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tvar td time.Time\n\t\t\t\t\tswitch d.Precision {\n\t\t\t\t\tcase \"s\", \"sec\", \"second\":\n\t\t\t\t\t\ttd = time.Unix(int64(iv), 0)\n\t\t\t\t\tcase \"ms\", \"millisecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv)*1000000)\n\t\t\t\t\tcase \"us\", \"microsecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv)*1000)\n\t\t\t\t\tcase \"ns\", \"nanosecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv))\n\t\t\t\t\t}\n\t\t\t\t\tif d.Format == \"\" {\n\t\t\t\t\t\td.Format = time.RFC3339\n\t\t\t\t\t}\n\t\t\t\t\te.Values[k] = td.In(d.location).Format(d.Format)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range d.tags {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tiv, err := strconv.Atoi(v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"failed to convert %s to int: %v\", v, err)\n\t\t\t\t\t}\n\t\t\t\t\tvar td time.Time\n\t\t\t\t\tswitch d.Precision {\n\t\t\t\t\tcase \"s\", \"sec\", \"second\":\n\t\t\t\t\t\ttd = time.Unix(int64(iv), 0)\n\t\t\t\t\tcase \"ms\", \"millisecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv)*1000000)\n\t\t\t\t\tcase \"us\", \"microsecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv)*1000)\n\t\t\t\t\tcase \"ns\", \"nanosecond\":\n\t\t\t\t\t\ttd = time.Unix(0, int64(iv))\n\t\t\t\t\t}\n\t\t\t\t\tif d.Format == \"\" {\n\t\t\t\t\t\td.Format = time.RFC3339\n\t\t\t\t\t}\n\t\t\t\t\te.Values[k] = td.Format(d.Format)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (d *dateString) WithLogger(l *log.Logger) {\n\tif d.Debug && l != nil {\n\t\td.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if d.Debug {\n\t\td.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc convertToInt(i interface{}) (int, error) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\tiv, err := strconv.Atoi(i)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\treturn iv, nil\n\tcase int:\n\t\treturn i, nil\n\tcase uint:\n\t\treturn int(i), nil\n\tcase float64:\n\t\treturn int(i), nil\n\tdefault:\n\t\treturn 0, errors.New(\"cannot convert to int\")\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_date_string/event_date_string_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_date_string\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"seconds_date_string\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"timestamp\"},\n\t\t\t\"precision\":   \"s\",\n\t\t\t\"location\":    \"Asia/Taipei\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"timestamp\": 1606824673},\n\t\t\t\t\t\tTags:   map[string]string{\"timestamp\": \"0\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"timestamp\": \"2020-12-01T20:11:13+08:00\"},\n\t\t\t\t\t\tTags:   map[string]string{\"timestamp\": \"0\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventDateString(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event date string, item %d, index %d\", i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_delete/event_delete.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_delete\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-delete\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// deletep, deletes ALL the tags or values matching one of the regexes\ntype deletep struct {\n\tformatters.BaseProcessor\n\tTags       []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tTagNames   []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttags   []*regexp.Regexp\n\tvalues []*regexp.Regexp\n\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &deletep{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (d *deletep) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\t// init tags regex\n\td.tags = make([]*regexp.Regexp, 0, len(d.Tags))\n\tfor _, reg := range d.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tags = append(d.tags, re)\n\t}\n\t// init tag names regex\n\td.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames))\n\tfor _, reg := range d.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tagNames = append(d.tagNames, re)\n\t}\n\t// init values regex\n\td.values = make([]*regexp.Regexp, 0, len(d.Values))\n\tfor _, reg := range d.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.values = append(d.values, re)\n\t}\n\t// init values names regex\n\td.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames))\n\tfor _, reg := range d.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.valueNames = append(d.valueNames, re)\n\t}\n\tif d.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(d)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"initialized processor '%s': %+v\", processorType, d)\n\t\t\treturn nil\n\t\t}\n\t\td.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (d *deletep) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range d.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range d.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range d.tagNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tdelete(e.Tags, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range d.tags {\n\t\t\t\tif re.MatchString(v) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tdelete(e.Tags, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (d *deletep) WithLogger(l *log.Logger) {\n\tif d.Debug && l != nil {\n\t\td.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if d.Debug {\n\t\td.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_delete/event_delete_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_delete\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"tag-names_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"^name*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"name-2\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_tag-names_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"^name*\", \"to_delete\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1, \"todelete\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1, \"todelete\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"to_delete\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"name-2\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"value-names_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"deleteme*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme\": 1, \"dont-deleteme\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_value-names_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"deleteme\", \"deleteme-too\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme\": 1, \"deleteme-too\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"tag-names_and_value-names_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"deleteme-value*\"},\n\t\t\t\"tag-names\":   []string{\"deleteme-tag*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme-value\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"deleteme-tag\": \"tag\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme-value\": 1, \"dont-deleteme\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"deleteme-tag\": \"tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"dont-deleteme\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"tags_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"^name*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t// 0\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// 1\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 3\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 4\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"name-2\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 5\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_tags_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"^name*\", \"to_delete\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t// 0\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// 1\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{Values: map[string]interface{}{\"name\": 1, \"todelete\": \"to_delete\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1, \"todelete\": \"to_delete\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 3\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"tag_name\": \"to_delete\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 4\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"name-2\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 5\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"name_tag\", \"name-2\": \"-name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"name-2\": \"-name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"values_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\": []string{\"deleteme*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t// 0\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// 1\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 2\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"deleteme\": \"deleteme\"},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t\tTags:   map[string]string{\"-name\": \"name-2_tag\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 3\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"foo\": \"deleteme\", \"dont-deleteme\": 1}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"dont-deleteme\": 1}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_values_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\": []string{\"deleteme\", \"deleteme-too\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"foo\": \"deleteme\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"foo\": \"deleteme\", \"bar\": \"deleteme-too\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"tags_and_values_delete\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"values\": []string{\"deleteme-value*\"},\n\t\t\t\"tags\":   []string{\"deleteme-tag*\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"foo-value\": \"deleteme-value\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"foo-tag\": \"deleteme-tag\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"foo-value\": \"deleteme-value\", \"dont-deleteme\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"foo-tag\": \"deleteme-tag\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"dont-deleteme\": 1},\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventDelete(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event delete, item %d, index %d\", i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"processors type %s not found\", ts.processorType)\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_drop/event_drop.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_drop\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-drop\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// drop Drops the msg if ANY of the Tags or Values regexes are matched\ntype drop struct {\n\tformatters.BaseProcessor\n\tCondition  string   `mapstructure:\"condition,omitempty\"`\n\tTagNames   []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tTags       []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\ttags       []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\tcode       *gojq.Code\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &drop{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (d *drop) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\td.Condition = strings.TrimSpace(d.Condition)\n\tq, err := gojq.Parse(d.Condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.code, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// init tag keys regex\n\td.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames))\n\tfor _, reg := range d.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tagNames = append(d.tagNames, re)\n\t}\n\td.tags = make([]*regexp.Regexp, 0, len(d.Tags))\n\tfor _, reg := range d.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.tags = append(d.tags, re)\n\t}\n\t//\n\td.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames))\n\tfor _, reg := range d.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.valueNames = append(d.valueNames, re)\n\t}\n\n\td.values = make([]*regexp.Regexp, 0, len(d.values))\n\tfor _, reg := range d.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.values = append(d.values, re)\n\t}\n\tif d.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(d)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"initialized processor '%s': %+v\", processorType, d)\n\t\t\treturn nil\n\t\t}\n\t\td.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (d *drop) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\ti := 0\n\tfor _, e := range es {\n\t\tif !d.drop(e) {\n\t\t\tes[i] = e\n\t\t\ti++\n\t\t}\n\t}\n\tfor j := i; j < len(es); j++ {\n\t\tes[j] = nil\n\t}\n\tes = es[:i]\n\treturn es\n}\n\nfunc (d *drop) WithLogger(l *log.Logger) {\n\tif d.Debug && l != nil {\n\t\td.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if d.Debug {\n\t\td.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (d *drop) drop(e *formatters.EventMsg) bool {\n\tif d.Condition != \"\" {\n\t\tok, err := formatters.CheckCondition(d.code, e)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"condition check failed: %v\", err)\n\t\t\treturn true\n\t\t}\n\t\treturn ok\n\t}\n\tfor k, v := range e.Values {\n\t\tfor _, re := range d.valueNames {\n\t\t\tif re.MatchString(k) {\n\t\t\t\td.logger.Printf(\"value name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tfor _, re := range d.values {\n\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\td.logger.Printf(\"value '%s' matched regex '%s'\", v, re.String())\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor k, v := range e.Tags {\n\t\tfor _, re := range d.tagNames {\n\t\t\tif re.MatchString(k) {\n\t\t\t\td.logger.Printf(\"tag name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tfor _, re := range d.tags {\n\t\t\tif re.MatchString(v) {\n\t\t\t\td.logger.Printf(\"tag '%s' matched regex '%s'\", v, re.String())\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/formatters/event_drop/event_drop_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_drop\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"drop_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": \".values.value == 1\",\n\t\t\t\"debug\":     true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 3},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 0},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 2},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 3},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"drop_values\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^number$\"},\n\t\t\t\"debug\":       true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"number\": 1},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t},\n\t},\n\t\"drop_tags\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"^name*\"},\n\t\t\t\"debug\":     true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventDrop(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tif len(outs) != len(item.output) {\n\t\t\t\t\t\tt.Logf(\"output length mismatch\")\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event drop, item %d, index %d\", i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar input = []*formatters.EventMsg{\n\t{\n\t\tValues: map[string]interface{}{\"value\": 0},\n\t},\n\t{\n\t\tValues: map[string]interface{}{\"value\": 1},\n\t},\n\t{\n\t\tValues: map[string]interface{}{\"value\": 2},\n\t},\n\t{\n\t\tValues: map[string]interface{}{\"value\": 3},\n\t},\n}\n\nfunc BenchmarkApply(b *testing.B) {\n\tpi := formatters.EventProcessors[\"event-drop\"]\n\tp := pi()\n\terr := p.Init(map[string]interface{}{\n\t\t\"condition\": \".values.value >= 1\",\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tp.Apply(input...)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_duration_convert/event_duration_convert.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_data_convert\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-duration-convert\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\nvar durationRegex = regexp.MustCompile(`((?P<weeks>\\d+)w)?((?P<days>\\d+)d)?((?P<hours>\\d+)h)?((?P<minutes>\\d+)m)?((?P<seconds>\\d+)s)?`)\n\n// durationConvert converts the value with key matching one of regexes, to the specified duration precision\ntype durationConvert struct {\n\tformatters.BaseProcessor\n\tValues []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tKeep   bool     `mapstructure:\"keep,omitempty\" json:\"keep,omitempty\"`\n\tDebug  bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalues []*regexp.Regexp\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &durationConvert{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (c *durationConvert) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tc.values = make([]*regexp.Regexp, 0, len(c.Values))\n\tfor _, reg := range c.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.values = append(c.values, re)\n\t}\n\tif c.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(c)\n\t\tif err != nil {\n\t\t\tc.logger.Printf(\"initialized processor '%s': %+v\", processorType, c)\n\t\t\treturn nil\n\t\t}\n\t\tc.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\n\treturn nil\n}\n\nfunc (c *durationConvert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// add new Values to a new map to avoid multiple chained regex matches\n\t\tnewValues := make(map[string]interface{})\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range c.values {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tc.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tdur, err := c.convertDuration(k, v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.logger.Printf(\"duration convert error: %v\", err)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tc.logger.Printf(\"key '%s', value %v converted to seconds: %d\", k, v, dur)\n\t\t\t\t\tif c.Keep {\n\t\t\t\t\t\tnewValues[fmt.Sprintf(\"%s_seconds\", k)] = dur\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tnewValues[k] = dur\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// add new values to the original message\n\t\tfor k, v := range newValues {\n\t\t\te.Values[k] = v\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (c *durationConvert) WithLogger(l *log.Logger) {\n\tif c.Debug && l != nil {\n\t\tc.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if c.Debug {\n\t\tc.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (c *durationConvert) convertDuration(k string, i interface{}) (int64, error) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\tiv, err := strconv.Atoi(i)\n\t\tif err != nil {\n\t\t\treturn parseStringDuration(i)\n\t\t}\n\t\treturn c.convertDuration(k, iv)\n\tcase int:\n\t\treturn int64(i), nil\n\tcase int8:\n\t\treturn int64(i), nil\n\tcase int16:\n\t\treturn int64(i), nil\n\tcase int32:\n\t\treturn int64(i), nil\n\tcase int64:\n\t\treturn int64(i), nil\n\tcase uint:\n\t\treturn int64(i), nil\n\tcase uint8:\n\t\treturn int64(i), nil\n\tcase uint16:\n\t\treturn int64(i), nil\n\tcase uint32:\n\t\treturn int64(i), nil\n\tcase uint64:\n\t\treturn int64(i), nil\n\tcase float64:\n\t\treturn int64(i), nil\n\tcase float32:\n\t\treturn int64(i), nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"cannot convert %v, type %T\", i, i)\n\t}\n}\n\nfunc parseStringDuration(s string) (int64, error) {\n\tmatch := durationRegex.FindStringSubmatch(s)\n\tnamedGroups := make(map[string]string)\n\tfor i, name := range durationRegex.SubexpNames() {\n\t\tif i != 0 && name != \"\" {\n\t\t\tnamedGroups[name] = match[i]\n\t\t}\n\t}\n\tr := int64(0)\n\tfor k, v := range namedGroups {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tswitch k {\n\t\tcase \"weeks\":\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr += int64(i) * 7 * 24 * 60 * 60\n\t\tcase \"days\":\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr += int64(i) * 24 * 60 * 60\n\t\tcase \"hours\":\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr += int64(i) * 60 * 60\n\t\tcase \"minutes\":\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr += int64(i) * 60\n\t\tcase \"seconds\":\n\t\t\ti, err := strconv.Atoi(v)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tr += int64(i)\n\t\t}\n\t}\n\treturn r, nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_duration_convert/event_duration_convert_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_data_convert\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\toneMins = int64(60)\n\toneHs   = int64(60 * 60)\n\toneDs   = int64(24 * 60 * 60)\n\toneWs   = int64(7 * 24 * 60 * 60)\n)\n\nfunc Test_durationConvert_Apply(t *testing.T) {\n\ttype fields map[string]interface{}\n\ttype args struct {\n\t\tes []*formatters.EventMsg\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t\twant   []*formatters.EventMsg\n\t}{\n\t\t{\n\t\t\tname: \"nil_input\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"week\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"week_day\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w2d\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs + 2*oneDs,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"week_day_hour\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w2d3h\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs + 2*oneDs + 3*oneHs,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"week_day_hour_minute\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w2d3h4m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs + 2*oneDs + 3*oneHs + 4*oneMins,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"week_day_hour_minute_second\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w2d3h4m5s\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs + 2*oneDs + 3*oneHs + 4*oneMins + 5,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"week_second\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*uptime\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"connection_uptime\": \"1w5s\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"connection_uptime\": oneWs + 5,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &durationConvert{}\n\t\t\terr := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, \"[event-duration-convert-test]\", log.Flags())))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to init processor in test %q: %v\", tt.name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif got := c.Apply(tt.args.es...); !cmp.Equal(got, tt.want) {\n\t\t\t\tt.Errorf(\"durationConvert.Apply() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_extract_tags/event_extract_tags.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_extract_tags\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-extract-tags\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// extractTags extracts tags from a value, a value name, a tag name or a tag value using regex named groups\ntype extractTags struct {\n\tformatters.BaseProcessor\n\tTags       []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tTagNames   []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tOverwrite  bool     `mapstructure:\"overwrite,omitempty\" json:\"overwrite,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttags       []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &extractTags{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *extractTags) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\t// init tags regex\n\tp.tags = make([]*regexp.Regexp, 0, len(p.Tags))\n\tfor _, reg := range p.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.tags = append(p.tags, re)\n\t}\n\t// init tag names regex\n\tp.tagNames = make([]*regexp.Regexp, 0, len(p.TagNames))\n\tfor _, reg := range p.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.tagNames = append(p.tagNames, re)\n\t}\n\t// init values regex\n\tp.values = make([]*regexp.Regexp, 0, len(p.Values))\n\tfor _, reg := range p.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.values = append(p.values, re)\n\t}\n\t// init value names regex\n\tp.valueNames = make([]*regexp.Regexp, 0, len(p.ValueNames))\n\tfor _, reg := range p.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.valueNames = append(p.valueNames, re)\n\t}\n\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *extractTags) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range p.valueNames {\n\t\t\t\tp.addTags(e, re, k)\n\t\t\t}\n\t\t\tfor _, re := range p.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tp.addTags(e, re, vs)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range p.tagNames {\n\t\t\t\tp.addTags(e, re, k)\n\t\t\t}\n\t\t\tfor _, re := range p.tags {\n\t\t\t\tp.addTags(e, re, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (p *extractTags) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *extractTags) addTags(e *formatters.EventMsg, re *regexp.Regexp, s string) {\n\tif e.Tags == nil {\n\t\te.Tags = make(map[string]string)\n\t}\n\n\tmatches := re.FindStringSubmatch(s)\n\tif p.Debug {\n\t\tp.logger.Printf(\"matches: %+v\", matches)\n\t}\n\tif len(matches) != len(re.SubexpNames()) {\n\t\treturn\n\t}\n\tfor i, name := range re.SubexpNames() {\n\t\tif i != 0 && name != \"\" {\n\t\t\tif p.Debug {\n\t\t\t\tp.logger.Printf(\"adding: name=%s, value=%s\", name, matches[i])\n\t\t\t}\n\t\t\tif p.Overwrite {\n\t\t\t\te.Tags[name] = matches[i]\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := e.Tags[matches[i]]; !ok {\n\t\t\t\te.Tags[name] = matches[i]\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_extract_tags/event_extract_tags_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_extract_tags\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"match_value_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t\t\"value-names\": []string{\n\t\t\t\t`/(?P<e1>\\w+)/(?P<e2>\\w+)/(?P<e3>\\w+)`,\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"/elem1/elem2/elem3\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"/elem1/elem2/elem3\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"e1\":   \"elem1\",\n\t\t\t\t\t\t\t\"e2\":   \"elem2\",\n\t\t\t\t\t\t\t\"e3\":   \"elem3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_value_names_partial\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t\t\"value-names\": []string{\n\t\t\t\t`/(?P<e1>\\w+)/(\\w+)/(?P<e3>\\w+)`,\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"/elem1/elem2/elem3\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"/elem1/elem2/elem3\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"e1\":   \"elem1\",\n\t\t\t\t\t\t\t\"e3\":   \"elem3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_names\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t\t\"tag-names\": []string{\n\t\t\t\t`/(?P<e1>\\w+)/(?P<e2>\\w+)/(?P<e3>\\w+)`,\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\":               \"1\",\n\t\t\t\t\t\t\t\"/elem1/elem2/elem3\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"/elem1/elem2/elem3\": \"1\",\n\t\t\t\t\t\t\t\"tag1\":               \"1\",\n\t\t\t\t\t\t\t\"e1\":                 \"elem1\",\n\t\t\t\t\t\t\t\"e2\":                 \"elem2\",\n\t\t\t\t\t\t\t\"e3\":                 \"elem3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_tag_names_partial\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t\t\"tag-names\": []string{\n\t\t\t\t`/(?P<e1>\\w+)/(\\w+)/(?P<e3>\\w+)`,\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\":               \"1\",\n\t\t\t\t\t\t\t\"/elem1/elem2/elem3\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"/elem1/elem2/elem3\": \"1\",\n\t\t\t\t\t\t\t\"tag1\":               \"1\",\n\t\t\t\t\t\t\t\"e1\":                 \"elem1\",\n\t\t\t\t\t\t\t\"e3\":                 \"elem3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventAddTag(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d, expected: %+v\", name, i, j, item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d,      got: %+v\", name, i, j, outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_group_by/event_group_by.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_group_by\n\nimport (\n\t\"encoding/json\"\n\t\"hash/fnv\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-group-by\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// groupBy groups values from different event messages in the same event message\n// based on tags values\ntype groupBy struct {\n\tformatters.BaseProcessor\n\tTags   []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tByName bool     `mapstructure:\"by-name,omitempty\" json:\"by-name,omitempty\"`\n\tDebug  bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &groupBy{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *groupBy) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *groupBy) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tresult := make([]*formatters.EventMsg, 0, len(es))\n\tif p.Debug {\n\t\tp.logger.Printf(\"before: %+v\", es)\n\t}\n\tif !p.ByName {\n\t\tresult = p.byTags(es)\n\t\tif p.Debug {\n\t\t\tp.logger.Printf(\"after: %+v\", result)\n\t\t}\n\t\treturn result\n\t}\n\tgroups := make(map[string][]*formatters.EventMsg)\n\tnames := make([]string, 0)\n\tfor _, e := range es {\n\t\t_, ok := groups[e.Name]\n\t\tif !ok {\n\t\t\tgroups[e.Name] = make([]*formatters.EventMsg, 0)\n\t\t\tnames = append(names, e.Name)\n\t\t}\n\t\tgroups[e.Name] = append(groups[e.Name], e)\n\t}\n\tslices.Sort(names)\n\tfor _, n := range names {\n\t\tresult = append(result, p.byTags(groups[n])...)\n\t}\n\tif p.Debug {\n\t\tp.logger.Printf(\"after: %+v\", result)\n\t}\n\treturn result\n}\n\nfunc (p *groupBy) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *groupBy) byTagsOld(es []*formatters.EventMsg) []*formatters.EventMsg {\n\tif len(p.Tags) == 0 {\n\t\treturn es\n\t}\n\tresult := make([]*formatters.EventMsg, 0, len(es))\n\tgroups := make(map[string]*formatters.EventMsg)\n\tkeys := make([]string, 0)\n\tfor _, e := range es {\n\t\tif e == nil || e.Tags == nil || (e.Values == nil && e.Deletes == nil) {\n\t\t\tcontinue\n\t\t}\n\t\texist := true\n\t\tvar key strings.Builder\n\t\tfor _, t := range p.Tags {\n\t\t\tif v, ok := e.Tags[t]; ok {\n\t\t\t\tkey.WriteString(t)\n\t\t\t\tkey.Write(eqByte)\n\t\t\t\tkey.WriteString(v)\n\t\t\t\tkey.Write(pipeByte)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texist = false\n\t\t\tbreak\n\t\t}\n\t\tif !exist {\n\t\t\tresult = append(result, e)\n\t\t\tcontinue\n\t\t}\n\n\t\tskey := key.String()\n\t\tgroup, ok := groups[skey]\n\t\tif !ok {\n\t\t\tkeys = append(keys, skey)\n\t\t\tgroup = &formatters.EventMsg{\n\t\t\t\tName:      e.Name,\n\t\t\t\tTimestamp: e.Timestamp,\n\t\t\t\tTags:      make(map[string]string),\n\t\t\t\tValues:    make(map[string]interface{}),\n\t\t\t}\n\t\t\tgroups[skey] = group\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tgroup.Tags[k] = v\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tgroup.Values[k] = v\n\t\t}\n\t\tif e.Deletes != nil {\n\t\t\tgroup.Deletes = append(group.Deletes, e.Deletes...)\n\t\t}\n\t}\n\tslices.Sort(keys)\n\tfor _, k := range keys {\n\t\tresult = append(result, groups[k])\n\t}\n\treturn result\n}\n\nfunc (p *groupBy) byTags(es []*formatters.EventMsg) []*formatters.EventMsg {\n\tif len(p.Tags) == 0 {\n\t\treturn es\n\t}\n\n\tresult := make([]*formatters.EventMsg, 0, len(es))\n\tgroups := make(map[uint64]*formatters.EventMsg)\n\n\tfor _, e := range es {\n\t\tif e == nil || e.Tags == nil || (e.Values == nil && e.Deletes == nil) {\n\t\t\tcontinue\n\t\t}\n\n\t\t//grouping key based on tags value\n\t\tskey, match := generateKeyAndCheck(e.Tags, p.Tags)\n\t\tif !match {\n\t\t\tresult = append(result, e)\n\t\t\tcontinue\n\t\t}\n\n\t\tgroup, exists := groups[skey]\n\t\tif !exists {\n\t\t\tgroup = &formatters.EventMsg{\n\t\t\t\tName:      e.Name,\n\t\t\t\tTimestamp: e.Timestamp,\n\t\t\t\tTags:      make(map[string]string, len(e.Tags)),\n\t\t\t\tValues:    make(map[string]interface{}, len(e.Values)),\n\t\t\t\tDeletes:   make([]string, 0, len(e.Deletes)),\n\t\t\t}\n\t\t\tgroups[skey] = group\n\t\t}\n\n\t\t// merge tags, values and deletes into the group\n\t\tfor k, v := range e.Tags {\n\t\t\tgroup.Tags[k] = v\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tgroup.Values[k] = v\n\t\t}\n\t\tif e.Deletes != nil {\n\t\t\tgroup.Deletes = append(group.Deletes, e.Deletes...)\n\t\t}\n\t}\n\n\tfor _, ev := range groups {\n\t\tresult = append(result, ev)\n\t}\n\n\treturn result\n}\n\nfunc generateKeyAndCheck(tags map[string]string, keys []string) (uint64, bool) {\n\th := fnv.New64a()\n\n\tfor _, k := range keys {\n\t\tv, ok := tags[k]\n\t\tif !ok {\n\t\t\treturn 0, false\n\t\t}\n\t\th.Write([]byte(k))\n\t\th.Write([]byte(eqByte))\n\t\th.Write([]byte(v))\n\t\th.Write([]byte(pipeByte))\n\t}\n\n\treturn h.Sum64(), true\n}\n\nvar (\n\teqByte   = []byte(\"=\")\n\tpipeByte = []byte(\"|\")\n)\n"
  },
  {
    "path": "pkg/formatters/event_group_by/event_group_by_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_group_by\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"group_by_1_tag\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"tag1\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags:   map[string]string{\"tag2\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value3\": 3,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value1\"},\n\t\t\t\t\t\tTags:    map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value2\"},\n\t\t\t\t\t\tTags:    map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value3\"},\n\t\t\t\t\t\tTags:    map[string]string{\"tag2\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value1\",\n\t\t\t\t\t\t\t\"value2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"group_by_2_tags\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tags\": []string{\"tag1\", \"tag2\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value4\": 4},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value3\": 3,\n\t\t\t\t\t\t\t\"value4\": 4,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value1\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value2\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value3\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDeletes: []string{\"value4\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value1\",\n\t\t\t\t\t\t\t\"value2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value3\",\n\t\t\t\t\t\t\t\"value4\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value4\": 4},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value3\": 3,\n\t\t\t\t\t\t\t\"value4\": 4,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"group_by_name\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"by-name\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"group_by_name_by_tags\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"by-name\": true,\n\t\t\t\"tags\":    []string{\"tag1\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tDeletes: []string{\"value1\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tDeletes: []string{\"value2\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\t\"value1\",\n\t\t\t\t\t\t\t\"value2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tDeletes: []string{\"value1\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tDeletes: []string{\"value2\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tValues:  make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\"value1\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:    \"sub1\",\n\t\t\t\t\t\tValues:  make(map[string]interface{}),\n\t\t\t\t\t\tDeletes: []string{\"value2\"},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventGroupBy(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tif len(outs) != len(item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s, outputs not of same length\", name)\n\t\t\t\t\t\tt.Errorf(\"expected: %v\", item.output)\n\t\t\t\t\t\tt.Errorf(\"     got: %v\", outs)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif !slicesEqual(outs, item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s, expected: %+v\", name, item.output)\n\t\t\t\t\t\tt.Errorf(\"failed at %s,      got: %+v\", name, outs)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n\nfunc generateMockEvents(numEvents, numTags int) []*formatters.EventMsg {\n\tes := make([]*formatters.EventMsg, numEvents)\n\tfor i := 0; i < numEvents; i++ {\n\t\ttags := make(map[string]string, numTags)\n\t\tvalues := make(map[string]interface{}, numTags)\n\t\tfor j := 0; j < numTags; j++ {\n\t\t\ttags[fmt.Sprintf(\"tag%d\", j)] = fmt.Sprintf(\"value%d\", j)\n\t\t\tvalues[fmt.Sprintf(\"valueKey%d\", j)] = fmt.Sprintf(\"value%d\", j)\n\t\t}\n\t\tes[i] = &formatters.EventMsg{\n\t\t\tName:      fmt.Sprintf(\"event%d\", i%5), // Group some events by name\n\t\t\tTimestamp: int64(i),\n\t\t\tTags:      tags,\n\t\t\tValues:    values,\n\t\t}\n\t}\n\treturn es\n}\n\nfunc BenchmarkByTags(b *testing.B) {\n\tp := &groupBy{Tags: []string{\"tag1\", \"tag2\"}}\n\n\t// Generate mock event messages\n\tes := generateMockEvents(100_000, 5)\n\n\tb.Run(\"OldByTags\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_ = p.byTagsOld(es)\n\t\t}\n\t})\n\n\tb.Run(\"NewByTags\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t_ = p.byTags(es)\n\t\t}\n\t})\n}\n\nfunc slicesEqual(slice1, slice2 []*formatters.EventMsg) bool {\n\tif len(slice1) != len(slice2) {\n\t\treturn false\n\t}\n\n\t// Create a map to track matches in slice2\n\tused := make([]bool, len(slice2))\n\n\t// Check that every item in slice1 has a match in slice2\n\tfor _, e1 := range slice1 {\n\t\tfound := false\n\t\tfor i, e2 := range slice2 {\n\t\t\tif !used[i] && eventMsgEqual(e1, e2) {\n\t\t\t\tused[i] = true\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false // No match found for this item\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc eventMsgEqual(a, b *formatters.EventMsg) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\n\tif a.Name != b.Name || a.Timestamp != b.Timestamp {\n\t\treturn false\n\t}\n\n\tif !reflect.DeepEqual(a.Tags, b.Tags) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.Values, b.Values) {\n\t\treturn false\n\t}\n\tif a.Deletes == nil && b.Deletes == nil {\n\t\treturn true\n\t}\n\tif len(a.Deletes) == 0 && len(b.Deletes) == 0 {\n\t\treturn true\n\t}\n\tif !reflect.DeepEqual(a.Deletes, b.Deletes) {\n\t\treturn false\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/formatters/event_ieeefloat32/event_ieeefloat32.go",
    "content": "// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_ieeefloat32\n\nimport (\n\t\"encoding/base64\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-ieeefloat32\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// ieeefloat32 converts values from a base64 encoded string into a float32\ntype ieeefloat32 struct {\n\tformatters.BaseProcessor\n\tCondition  string   `mapstructure:\"condition,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalueNames []*regexp.Regexp\n\tcode       *gojq.Code\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &ieeefloat32{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *ieeefloat32) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\tif p.Condition != \"\" {\n\t\tp.Condition = strings.TrimSpace(p.Condition)\n\t\tq, err := gojq.Parse(p.Condition)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.code, err = gojq.Compile(q)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// init value names regex\n\tp.valueNames, err = compileRegex(p.ValueNames)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *ieeefloat32) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// condition is set\n\t\tif p.code != nil && p.Condition != \"\" {\n\t\t\tok, err := formatters.CheckCondition(p.code, e)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"condition check failed: %v\", err)\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// condition passed => check regexes\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range p.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tf, err := p.decodeBase64String(v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tp.logger.Printf(\"failed to decode base64 string: %v\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\te.Values[k] = f\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (p *ieeefloat32) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *ieeefloat32) decodeBase64String(e any) (float32, error) {\n\tvar err error\n\tvar data []byte\n\tswitch b64 := e.(type) {\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid type: %T\", e)\n\tcase string:\n\t\tdata, err = base64.StdEncoding.DecodeString(b64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"failed to decode base64: %v\", err)\n\t\t}\n\tcase []byte:\n\t\tdata = b64\n\t}\n\tif len(data) < 4 {\n\t\treturn 0, fmt.Errorf(\"decoded data is less than 4 bytes\")\n\t}\n\tbits := binary.BigEndian.Uint32(data[:4])\n\tfloatVal := math.Float32frombits(bits)\n\treturn floatVal, nil\n}\n\nfunc compileRegex(expr []string) ([]*regexp.Regexp, error) {\n\tres := make([]*regexp.Regexp, 0, len(expr))\n\tfor _, reg := range expr {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, re)\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_ieeefloat32/event_ieeefloat32_test.go",
    "content": "// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_ieeefloat32\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"simple\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\n\t\t\t\t\"^components/component/power-supply/state/output-current$\",\n\t\t\t\t\"^components/component/power-supply/state/input-current$\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"components/component/power-supply/state/output-current\": \"QEYAAA==\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"components/component/power-supply/state/output-current\": float32(3.09375)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"components/component/power-supply/state/output-current\": \"QEYAAA==\",\n\t\t\t\t\t\t\t\"components/component/power-supply/state/input-current\":  \"QEYAAA==\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"components/component/power-supply/state/output-current\": float32(3.09375),\n\t\t\t\t\t\t\t\"components/component/power-supply/state/input-current\":  float32(3.09375),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventIEEEFloat32(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d, expected: %+v\", name, i, j, item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d,      got: %+v\", name, i, j, outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_jq/event_jq.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_jq\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType     = \"event-jq\"\n\tloggingPrefix     = \"[\" + processorType + \"] \"\n\tdefaultCondition  = \"all([true])\"\n\tdefaultExpression = \".\"\n)\n\n// jq runs a jq expression on the received event messages\ntype jq struct {\n\tformatters.BaseProcessor\n\tCondition  string `mapstructure:\"condition,omitempty\"`\n\tExpression string `mapstructure:\"expression,omitempty\"`\n\tDebug      bool   `mapstructure:\"debug,omitempty\"`\n\n\tcond   *gojq.Code\n\texpr   *gojq.Code\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &jq{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *jq) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\tp.setDefaults()\n\tp.Condition = strings.TrimSpace(p.Condition)\n\tq, err := gojq.Parse(p.Condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.cond, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Expression = strings.TrimSpace(p.Expression)\n\tq, err = gojq.Parse(p.Expression)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.expr, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (p *jq) setDefaults() {\n\tif p.Condition == \"\" {\n\t\tp.Condition = defaultCondition\n\t}\n\tif p.Expression == \"\" {\n\t\tp.Expression = defaultExpression\n\t}\n}\n\nfunc (p *jq) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tnuMsgs := len(es)\n\tinputs := make([]interface{}, 0, nuMsgs)\n\tres := make([]*formatters.EventMsg, 0, nuMsgs)\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tinput := e.ToMap()\n\t\tok, err := p.evaluateCondition(input)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to evaluate condition: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif ok {\n\t\t\tinputs = append(inputs, input)\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, e)\n\t}\n\tevs, err := p.applyExpression(inputs)\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to apply jq expression: %v\", err)\n\t\treturn nil\n\t}\n\treturn append(res, evs...)\n}\n\nfunc (p *jq) evaluateCondition(input map[string]interface{}) (bool, error) {\n\tvar res interface{}\n\tvar err error\n\tif p.cond != nil {\n\t\titer := p.cond.Run(input)\n\t\tvar ok bool\n\t\tres, ok = iter.Next()\n\t\tif !ok {\n\t\t\t// iterator not done, so the final result won't be a boolean\n\t\t\treturn false, nil\n\t\t}\n\t\tif err, ok = res.(error); ok {\n\t\t\treturn false, err\n\t\t}\n\t\tp.logger.Printf(\"condition jq result: (%T)%v for input %+v\", res, res, input)\n\t}\n\tswitch res := res.(type) {\n\tcase bool:\n\t\treturn res, nil\n\tdefault:\n\t\treturn false, errors.New(\"unexpected condition return type\")\n\t}\n}\n\nfunc (p *jq) applyExpression(input []interface{}) ([]*formatters.EventMsg, error) {\n\tvar res []interface{}\n\tvar err error\n\tvar evs = make([]*formatters.EventMsg, 0)\n\titer := p.expr.Run(input)\n\tfor {\n\t\tr, ok := iter.Next()\n\t\tif !ok {\n\t\t\tp.logger.Printf(\"iter done? %v | r=%v\", ok, r)\n\t\t\tbreak\n\t\t}\n\t\tp.logger.Printf(\"iter result: (%T)%+v\\n\", r, r)\n\t\tswitch r := r.(type) {\n\t\tcase error:\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\tp.logger.Printf(\"adding %+v\\n\", r)\n\t\t\tres = append(res, r)\n\t\t}\n\t}\n\tfor _, e := range res {\n\t\tswitch es := e.(type) {\n\t\tcase []interface{}:\n\t\t\tfor _, ee := range es {\n\t\t\t\tswitch ee := ee.(type) {\n\t\t\t\tcase map[string]interface{}:\n\t\t\t\t\tev, err := formatters.EventFromMap(ee)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tevs = append(evs, ev)\n\t\t\t\tdefault:\n\t\t\t\t\tp.logger.Printf(\"unexpected type (%T)%+v\", ee, ee)\n\t\t\t\t}\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tev, err := formatters.EventFromMap(es)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tevs = append(evs, ev)\n\t\tdefault:\n\t\t\tp.logger.Printf(\"unexpected type (%T)%+v\", e, e)\n\t\t}\n\t}\n\treturn evs, nil\n}\n\nfunc (p *jq) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_jq/event_jq_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_jq\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"default_values\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"simple_select_expression\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] | select(.name==\"sub1\")`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"double_condition_and_select_expression\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] | select(.name==\"sub1\" and .values.counter1 > 90)`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t},\n\t},\n\t\"complex_select_expression\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] | select((.name==\"sub1\" and .values.counter1 > 90) or (.name==\"sub2\" and .values.counter2 > 80))`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"delete_a_single_value\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] | del(.values.counter1)`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"delete_multiple_values\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] | del(.values.[\"counter1\", \"counter2\"])`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"add_a_tag\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] |= (.tags.new = \"TAG1\")`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"new\":  \"TAG1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"new\":  \"TAG1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"new\":  \"TAG1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"new\":  \"TAG1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"add_a_value\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"expression\": `.[] |= (.values.new = \"Value1\")`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t\t\"new\":      \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value\": 1,\n\t\t\t\t\t\t\t\"new\":   \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t\t\"new\":      \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t\t\"new\":      \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"add_a_value_with_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\":  `.tags | has(\"tag1\")`,\n\t\t\t\"expression\": `.[] |= (.values.new = \"Value1\")`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t\t\"new\":      \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:   \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\"value\": 1},\n\t\t\t\t\t\tTags:   map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value\": 1,\n\t\t\t\t\t\t\t\"new\":   \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub2\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t\t\"new\":      \"Value1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"expression_with_$var\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\":  `.values | has(\"a\")`,\n\t\t\t\"expression\": `.[] | .values.a as $x | .values.b=$x+1`,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"a\": 42,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"a\": 42,\n\t\t\t\t\t\t\t\"b\": 43,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventJQ(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor, formatters.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range item.input {\n\t\t\t\t\t\tt.Logf(\"%q item %d, index %d, inputs=%+v\", name, i, j, item.input[j])\n\t\t\t\t\t}\n\t\t\t\t\t// compare lengths first\n\t\t\t\t\tif len(outs) != len(item.output) {\n\t\t\t\t\t\tt.Logf(\"expected and gotten outputs are not of the same length\")\n\t\t\t\t\t\tt.Logf(\"expected: %+v\", item.output)\n\t\t\t\t\t\tt.Logf(\"     got: %+v\", outs)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t\t//\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tt.Logf(\"%q item %d, index %d, output=%+v\", name, i, j, outs[j])\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at %s item %d, index %d\", name, i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %+v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %+v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_merge/event_merge.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_merge\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-merge\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// merge merges a list of event messages into one or multiple messages based on some criteria\ntype merge struct {\n\tformatters.BaseProcessor\n\tAlways bool `mapstructure:\"always,omitempty\" json:\"always,omitempty\"`\n\tDebug  bool `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &merge{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *merge) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *merge) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tif len(es) == 0 {\n\t\treturn nil\n\t}\n\tif p.Always {\n\t\tfor i, e := range es {\n\t\t\tif e == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif i > 0 {\n\t\t\t\tmergeEvents(es[0], e)\n\t\t\t}\n\t\t}\n\t\treturn []*formatters.EventMsg{es[0]}\n\t}\n\tresult := make([]*formatters.EventMsg, 0, len(es))\n\ttimestamps := make(map[int64]int)\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif idx, ok := timestamps[e.Timestamp]; ok {\n\t\t\tmergeEvents(result[idx], e)\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, e)\n\t\ttimestamps[e.Timestamp] = len(result) - 1\n\t}\n\treturn result\n}\n\nfunc (p *merge) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc mergeEvents(e1, e2 *formatters.EventMsg) {\n\tif e1.Tags == nil {\n\t\te1.Tags = make(map[string]string)\n\t}\n\tif e1.Values == nil {\n\t\te1.Values = make(map[string]interface{})\n\t}\n\tfor n, t := range e2.Tags {\n\t\te1.Tags[n] = t\n\t}\n\tfor n, v := range e2.Values {\n\t\te1.Values[n] = v\n\t}\n\te1.Deletes = append(e1.Deletes, e2.Deletes...)\n\tif e2.Timestamp > e1.Timestamp {\n\t\te1.Timestamp = e2.Timestamp\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_merge/event_merge_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_merge\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"merge_by_timestamps\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"always\": false,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags:      map[string]string{\"tag3\": \"3\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t\t\"value3\": 3,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t\t\"tag3\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"name\": \"foo\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"name\": 1},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"name\": \"foo\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"merge_always\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"always\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value1\": 1},\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value2\": 2},\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues:    map[string]interface{}{\"value3\": 3},\n\t\t\t\t\t\tTags:      map[string]string{\"tag3\": \"3\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"value1\": 1,\n\t\t\t\t\t\t\t\"value2\": 2,\n\t\t\t\t\t\t\t\"value3\": 3,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t\t\"tag2\": \"2\",\n\t\t\t\t\t\t\t\"tag3\": \"3\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      make(map[string]string),\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":  1,\n\t\t\t\t\t\t\t\"name2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      make(map[string]string),\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventMerge(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, expected %+v, got: %+v\", name, i, j, item.output[j], outs[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_override_ts/event_override_ts.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_override_ts\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-override-ts\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// overrideTS Overrides the message timestamp with the local time\ntype overrideTS struct {\n\tformatters.BaseProcessor\n\n\tPrecision string `mapstructure:\"precision,omitempty\" json:\"precision,omitempty\"`\n\tDebug     bool   `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &overrideTS{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (o *overrideTS) Init(cfg any, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n\tif o.Precision == \"\" {\n\t\to.Precision = \"ns\"\n\t}\n\tif o.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(o)\n\t\tif err != nil {\n\t\t\to.logger.Printf(\"initialized processor '%s': %+v\", processorType, o)\n\t\t\treturn nil\n\t\t}\n\t\to.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (o *overrideTS) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tnow := time.Now()\n\t\to.logger.Printf(\"setting timestamp to %d with precision %s\", now.UnixNano(), o.Precision)\n\t\tswitch o.Precision {\n\t\tcase \"s\":\n\t\t\te.Timestamp = now.Unix()\n\t\tcase \"ms\":\n\t\t\te.Timestamp = now.UnixNano() / 1000000\n\t\tcase \"us\":\n\t\t\te.Timestamp = now.UnixNano() / 1000\n\t\tcase \"ns\":\n\t\t\te.Timestamp = now.UnixNano()\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (o *overrideTS) WithLogger(l *log.Logger) {\n\tif o.Debug && l != nil {\n\t\to.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if o.Debug {\n\t\to.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_override_ts/event_override_ts_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_override_ts\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar now = time.Now()\n\nvar testset = map[string]struct {\n\tprocessor map[string]interface{}\n\ttests     []item\n}{\n\t\"ms\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":  processorType,\n\t\t\t\"debug\": true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: now.UnixNano() / 1000000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"ns\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":      processorType,\n\t\t\t\"precision\": \"ns\",\n\t\t\t\"debug\":     true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: -1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: now.UnixNano(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"us\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":      processorType,\n\t\t\t\"precision\": \"us\",\n\t\t\t\"debug\":     true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: -1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: now.UnixNano() / 1000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"s\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":      processorType,\n\t\t\t\"precision\": \"s\",\n\t\t\t\"debug\":     true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: -1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: now.Unix(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventDateString(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tt.Log(name)\n\t\tif typ, ok := ts.processor[\"type\"]; ok {\n\t\t\tt.Log(\"found type\")\n\t\t\tif pi, ok := formatters.EventProcessors[typ.(string)]; ok {\n\t\t\t\tt.Log(\"found processor\")\n\t\t\t\tp := pi()\n\t\t\t\terr := p.Init(ts.processor)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\t\t\tfor i, item := range ts.tests {\n\t\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\t\tif outs[j].Timestamp < item.output[j].Timestamp {\n\t\t\t\t\t\t\t\tt.Logf(\"failed at event override_ts, item %d, index %d\", i, j)\n\t\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_plugin/plugin.go",
    "content": "package event_plugin\n\nimport (\n\t\"net/rpc\"\n\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype EventProcessorPlugin struct {\n\tImpl formatters.EventProcessor\n}\n\nfunc (p *EventProcessorPlugin) Server(*plugin.MuxBroker) (interface{}, error) {\n\treturn &eventProcessorRPCServer{Impl: p.Impl}, nil\n}\n\nfunc (p *EventProcessorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {\n\treturn &EventProcessorRPC{client: c}, nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_plugin/rpc.go",
    "content": "package event_plugin\n\nimport (\n\t\"encoding/gob\"\n\t\"log\"\n\t\"net/rpc\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-plugin\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\ntype InitArgs struct {\n\tCfg interface{}\n}\n\ntype ApplyArgs struct {\n\tEvents []*formatters.EventMsg\n}\n\ntype ApplyResponse struct {\n\tEvents []*formatters.EventMsg\n}\n\ntype (\n\tActionresponse     struct{}\n\tInitResponse       struct{}\n\tTargetresponse     struct{}\n\tProccessorresponse struct{}\n)\n\ntype eventProcessorRPCServer struct {\n\tImpl formatters.EventProcessor\n}\n\nfunc init() {\n\tgob.Register(map[string]interface{}{})\n\tgob.Register([]interface{}{})\n}\n\nfunc (s *eventProcessorRPCServer) Init(args *InitArgs, resp *InitResponse) error {\n\treturn s.Impl.Init(args.Cfg)\n}\n\nfunc (s *eventProcessorRPCServer) Apply(args *ApplyArgs, resp *ApplyResponse) error {\n\tresp.Events = s.Impl.Apply(args.Events...)\n\treturn nil\n}\n\nfunc (s *eventProcessorRPCServer) WithActions(args map[string]map[string]interface{}, resp *Actionresponse) error {\n\ts.Impl.WithActions(args)\n\treturn nil\n}\n\nfunc (s *eventProcessorRPCServer) WithTargets(args map[string]*types.TargetConfig, resp *Targetresponse) error {\n\ts.Impl.WithTargets(args)\n\treturn nil\n}\n\nfunc (s *eventProcessorRPCServer) WithProcessors(\n\targs map[string]map[string]interface{},\n\tresp *Proccessorresponse,\n) error {\n\ts.Impl.WithProcessors(args)\n\treturn nil\n}\n\nfunc (s *eventProcessorRPCServer) WithLogger() error {\n\treturn nil\n}\n\ntype EventProcessorRPC struct {\n\tclient *rpc.Client\n\tlogger *log.Logger\n}\n\nfunc (g *EventProcessorRPC) Init(cfg interface{}, opts ...formatters.Option) error {\n\tfor _, opt := range opts {\n\t\topt(g)\n\t}\n\terr := g.client.Call(\"Plugin.Init\", &InitArgs{Cfg: cfg}, &InitResponse{})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (g *EventProcessorRPC) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg {\n\tvar resp ApplyResponse\n\terr := g.client.Call(\"Plugin.Apply\", &ApplyArgs{Events: event}, &resp)\n\tif err != nil {\n\t\tg.logger.Print(\"RPC client call error: \", err)\n\t\treturn nil\n\t}\n\treturn resp.Events\n}\n\nfunc (g *EventProcessorRPC) WithActions(act map[string]map[string]interface{}) {\n\terr := g.client.Call(\"Plugin.WithActions\", act, &Actionresponse{})\n\tif err != nil {\n\t\tg.logger.Print(\"RPC client call error: \", err)\n\t}\n}\n\nfunc (g *EventProcessorRPC) WithTargets(tcs map[string]*types.TargetConfig) {\n\terr := g.client.Call(\"Plugin.WithTargets\", tcs, &Targetresponse{})\n\tif err != nil {\n\t\tg.logger.Print(\"RPC client call error: \", err)\n\t}\n}\n\nfunc (g *EventProcessorRPC) WithProcessors(procs map[string]map[string]any) {\n\terr := g.client.Call(\"Plugin.WithProcessors\", procs, &Proccessorresponse{})\n\tif err != nil {\n\t\tg.logger.Print(\"RPC client call error: \", err)\n\t}\n}\n\nfunc (g *EventProcessorRPC) WithLogger(l *log.Logger) {\n\tif l == nil {\n\t\tg.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t\treturn\n\t}\n\tg.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n}\n"
  },
  {
    "path": "pkg/formatters/event_rate_limit/event_rate_limit.go",
    "content": "package event_rate_limit\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"time\"\n\n\tlru \"github.com/hashicorp/golang-lru/v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType          = \"event-rate-limit\"\n\tloggingPrefix          = \"[\" + processorType + \"] \"\n\tdefaultCacheSize       = 1000\n\toneSecond        int64 = int64(time.Second)\n)\n\nvar (\n\teqChar = []byte(\"=\")\n\tlfChar = []byte(\"\\n\")\n)\n\n// rateLimit rate-limits the message to the given rate.\ntype rateLimit struct {\n\tformatters.BaseProcessor\n\n\tPerSecondLimit float64 `mapstructure:\"per-second,omitempty\" json:\"per-second,omitempty\"`\n\tCacheSize      int     `mapstructure:\"cache-size,omitempty\" json:\"cache-size,omitempty\"`\n\tDebug          bool    `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\t// eventIndex is an lru cache used to compare the events hash with known value.\n\t// LRU cache seems like a good choice because we expect the rate-limiter to be\n\t// most useful in burst scenarios.\n\t// We need some form of control over the size of the cache to contain RAM usage\n\t// so LRU is good in that respect also.\n\teventIndex *lru.Cache[string, int64]\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &rateLimit{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (o *rateLimit) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n\tif o.CacheSize <= 0 {\n\t\to.logger.Printf(\"using default value for lru size %d\", defaultCacheSize)\n\t\to.CacheSize = defaultCacheSize\n\n\t}\n\tif o.PerSecondLimit <= 0 {\n\t\treturn fmt.Errorf(\"provided limit is %f, must be greater than 0\", o.PerSecondLimit)\n\t}\n\tif o.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(o)\n\t\tif err != nil {\n\t\t\to.logger.Printf(\"initialized processor '%s': %+v\", processorType, o)\n\t\t\treturn nil\n\t\t}\n\t\to.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\n\to.eventIndex, err = lru.New[string, int64](o.CacheSize)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to initialize cache: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (o *rateLimit) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tvalidEs := make([]*formatters.EventMsg, 0, len(es))\n\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\th := hashEvent(e)\n\t\tts, has := o.eventIndex.Get(h)\n\t\t// we check that we have the event hash in the map, if not, it's the first time we see the event\n\t\tif val := float64(e.Timestamp-ts) * o.PerSecondLimit; has && e.Timestamp != ts && int64(val) < oneSecond {\n\t\t\t// reject event\n\t\t\to.logger.Printf(\"dropping event val %.2f lower than configured rate\", val)\n\t\t\tcontinue\n\t\t}\n\t\t// retain the last event that passed through\n\t\to.eventIndex.Add(h, e.Timestamp)\n\t\tvalidEs = append(validEs, e)\n\t}\n\n\treturn validEs\n}\n\nfunc hashEvent(e *formatters.EventMsg) string {\n\th := sha256.New()\n\ttagKeys := make([]string, len(e.Tags))\n\ti := 0\n\tfor tagKey := range e.Tags {\n\t\ttagKeys[i] = tagKey\n\t\ti++\n\t}\n\tsort.Strings(tagKeys)\n\n\tfor _, tagKey := range tagKeys {\n\t\th.Write([]byte(tagKey))\n\t\th.Write(eqChar)\n\t\th.Write([]byte(e.Tags[tagKey]))\n\t\th.Write(lfChar)\n\t}\n\n\treturn hex.EncodeToString(h.Sum(nil))\n}\n\nfunc (o *rateLimit) WithLogger(l *log.Logger) {\n\tif o.Debug && l != nil {\n\t\to.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if o.Debug {\n\t\to.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_rate_limit/event_rate_limit_test.go",
    "content": "package event_rate_limit\n\nimport (\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessor map[string]interface{}\n\ttests     []item\n}{\n\t\"1pps-notags-pass\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"debug\":      true,\n\t\t\t\"per-second\": 1.0,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 + 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 + 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"1pps-tags-pass\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"per-second\": 1.0,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1 + 1e9,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 + 1,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1 + 1e9,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 + 1,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"1pps-notags-drop\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"debug\":      true,\n\t\t\t\"per-second\": 1.0,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 - 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"1pps-tags-drop\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"per-second\": 1.0,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 - 1,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"100pps-tags-pass\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"per-second\": 100.0,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 / 100,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9 / 100,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"100pps-tags-drop\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"per-second\": 100.0,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1e9/100 - 1,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"a\": \"val-x\",\n\t\t\t\t\t\t\t\"b\": \"val-y\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"same-ts-pass\": {\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"type\":       processorType,\n\t\t\t\"per-second\": 100.0,\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestRateLimit(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tt.Log(name)\n\t\tif typ, ok := ts.processor[\"type\"]; ok {\n\t\t\tt.Log(\"found type\")\n\t\t\tif pi, ok := formatters.EventProcessors[typ.(string)]; ok {\n\t\t\t\tt.Log(\"found processor\")\n\t\t\t\tp := pi()\n\t\t\t\terr := p.Init(ts.processor, formatters.WithLogger(nil))\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\t\t\tfor i, item := range ts.tests {\n\t\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\t\tif len(outs) != len(item.output) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event rate_limit, item %d\", i)\n\t\t\t\t\t\t\tt.Logf(\"different number of events between output=%d and wanted=%d\", len(outs), len(item.output))\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_starlark/dict.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_starlark\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"go.starlark.net/starlark\"\n)\n\ntype isDict interface {\n\tstarlark.HasSetKey\n\tstarlark.IterableMapping\n\tClear() error\n\tDelete(starlark.Value) (starlark.Value, bool, error)\n}\n\ntype dict[K comparable, V any] struct {\n\tname      string\n\tm         map[K]V\n\titerCount int\n\tfrozen    bool\n}\n\nfunc newDict[K comparable, V any](name string, m map[K]V) *dict[K, V] {\n\tif m == nil {\n\t\tm = make(map[K]V)\n\t}\n\treturn &dict[K, V]{name: name, m: m}\n}\n\ntype builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error)\n\n// https://github.com/google/starlark-go/blob/243c74974e97462c5df21338e182470391748b04/starlark/library.go#L147\nfunc builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) {\n\tmethod := methods[name]\n\tif method == nil {\n\t\treturn starlark.None, fmt.Errorf(\"no such method %q\", name)\n\t}\n\n\t// Allocate a closure over 'method'.\n\timpl := func(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\t\treturn method(b, args, kwargs)\n\t}\n\treturn starlark.NewBuiltin(name, impl).BindReceiver(recv), nil\n}\n\nfunc builtinAttrNames(methods map[string]builtinMethod) []string {\n\tnames := make([]string, 0, len(methods))\n\tfor name := range methods {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}\n\n// dict implements starlark.Value\nfunc (d *dict[K, V]) String() string {\n\tb, _ := json.Marshal(d.m)\n\treturn string(b)\n}\n\n// dict implements starlark.Value\nfunc (d *dict[K, V]) Type() string {\n\treturn d.name\n}\n\n// dict implements starlark.Value\nfunc (d *dict[K, V]) Freeze() {\n\td.frozen = true\n}\n\n// dict implements starlark.Value\nfunc (d *dict[K, V]) Truth() starlark.Bool {\n\treturn len(d.m) != 0\n}\n\n// dict implements starlark.Value\nfunc (d *dict[K, V]) Hash() (uint32, error) {\n\treturn 0, errors.New(\"dict is not hashable\")\n}\n\n// AttrNames implements the starlark.HasAttrs interface.\nfunc (d *dict[K, V]) AttrNames() []string {\n\treturn builtinAttrNames(dictMethods)\n}\n\n// Attr implements the starlark.HasAttrs interface.\nfunc (d *dict[K, V]) Attr(name string) (starlark.Value, error) {\n\treturn builtinAttr(d, name, dictMethods)\n}\n\nvar dictMethods = map[string]builtinMethod{\n\t\"clear\":      dictClear,\n\t\"get\":        dictGet,\n\t\"items\":      dictItems,\n\t\"keys\":       dictKeys,\n\t\"pop\":        dictPop,\n\t\"setdefault\": dictSetDefault,\n\t\"update\":     dictUpdate,\n\t\"values\":     dictValues,\n}\n\n// Get implements the starlark.Mapping interface.\nfunc (d *dict[K, V]) Get(key starlark.Value) (v starlark.Value, found bool, err error) {\n\tk, err := toGoVal(key)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif kk, ok := k.(K); ok {\n\t\tgv, found := d.m[kk]\n\t\tif !found {\n\t\t\treturn starlark.None, false, nil\n\t\t}\n\t\tvv, err := toStarlarkValue(gv)\n\t\treturn vv, true, err\n\t}\n\treturn starlark.None, false, errors.New(\"key must be of type 'string'\")\n}\n\n// SetKey implements the starlark.HasSetKey interface to support map update\n// using x[k]=v syntax, like a dictionary.\nfunc (d *dict[K, V]) SetKey(k, v starlark.Value) error {\n\tif d.iterCount > 0 {\n\t\treturn fmt.Errorf(\"cannot insert during iteration\")\n\t}\n\tkk, err := toGoVal(k)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkey, ok := kk.(K)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unexpected key type: %T\", kk)\n\t}\n\n\tvv, err := toGoVal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif val, ok := vv.(V); ok {\n\t\td.m[key] = val\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unexpected value type: %T\", vv)\n}\n\n// Items implements the starlark.IterableMapping interface.\nfunc (d *dict[K, V]) Items() []starlark.Tuple {\n\titems := make([]starlark.Tuple, 0, len(d.m))\n\tfor k, v := range d.m {\n\t\tvalue, err := toStarlarkValue(v)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tkk, err := toStarlarkValue(k)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tpair := starlark.Tuple{kk, value}\n\t\titems = append(items, pair)\n\t}\n\treturn items\n}\n\nfunc (d *dict[K, V]) Clear() error {\n\tif d.iterCount > 0 {\n\t\treturn fmt.Errorf(\"cannot clear dict during iteration\")\n\t}\n\tfor k := range d.m {\n\t\tdelete(d.m, k)\n\t}\n\treturn nil\n}\n\nfunc (d *dict[K, V]) Delete(k starlark.Value) (v starlark.Value, found bool, err error) {\n\tif d.iterCount > 0 {\n\t\treturn nil, false, fmt.Errorf(\"cannot delete a key during iteration\")\n\t}\n\tgk, err := toGoVal(k)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tgkk, ok := gk.(K)\n\tif !ok {\n\t\treturn nil, false, fmt.Errorf(\"unexpected key type: %T\", gk)\n\t}\n\tvalue, ok := d.m[gkk]\n\tif ok {\n\t\tdelete(d.m, gkk)\n\t\tv, err := toStarlarkValue(value)\n\t\treturn v, ok, err\n\t}\n\treturn starlark.None, false, nil\n}\n\n// Iterate implements the starlark.Iterator interface.\nfunc (d *dict[K, V]) Iterate() starlark.Iterator {\n\td.iterCount++\n\ttags := make([]*tag[K, V], 0, len(d.m))\n\tfor k, v := range d.m {\n\t\ttags = append(tags, &tag[K, V]{key: k, value: v})\n\t}\n\treturn &dictIterator[K, V]{\n\t\tdict: &dict[K, V]{m: d.m},\n\t\ttags: tags,\n\t}\n}\n\ntype tag[K, V any] struct {\n\tkey   K\n\tvalue V\n}\n\ntype dictIterator[K comparable, V any] struct {\n\t*dict[K, V]\n\ttags []*tag[K, V]\n}\n\n// Next implements the starlark.Iterator interface.\nfunc (i *dictIterator[K, V]) Next(p *starlark.Value) bool {\n\tif len(i.tags) == 0 {\n\t\treturn false\n\t}\n\n\ttag := i.tags[0]\n\ti.tags = i.tags[1:]\n\tsk, err := toStarlarkValue(tag.key)\n\tif err != nil {\n\t\treturn false\n\t}\n\t*p = sk\n\n\treturn true\n}\n\n// Done implements the starlark.Iterator interface.\nfunc (i *dictIterator[K, V]) Done() {\n\ti.iterCount--\n}\n\n// --- dictionary methods ---\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear\nfunc dictClear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\n\treturn starlark.None, b.Receiver().(isDict).Clear()\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop\nfunc dictPop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar k, d starlark.Value\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\n\tv, found, err := b.Receiver().(isDict).Delete(k)\n\tif err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\tif found {\n\t\treturn v, nil\n\t}\n\tif d != nil {\n\t\treturn d, nil\n\t}\n\treturn starlark.None, fmt.Errorf(\"%s: missing key\", b.Name())\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get\nfunc dictGet(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar key, d starlark.Value\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &d); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\tv, ok, err := b.Receiver().(isDict).Get(key)\n\tif err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\tif ok {\n\t\treturn v, nil\n\t}\n\tif d != nil {\n\t\treturn d, nil\n\t}\n\treturn starlark.None, nil\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault\nfunc dictSetDefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar key starlark.Value\n\tvar d = starlark.None\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &d); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\n\trecv := b.Receiver().(isDict)\n\tv, found, err := recv.Get(key)\n\tif err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\tif !found {\n\t\tv = d\n\t\tif err := recv.SetKey(key, d); err != nil {\n\t\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t\t}\n\t}\n\treturn v, nil\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update\nfunc dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\t// Unpack the arguments\n\tif len(args) > 1 {\n\t\treturn nil, fmt.Errorf(\"update: got %d arguments, want at most 1\", len(args))\n\t}\n\n\t// Get the target\n\trecv := b.Receiver().(isDict)\n\n\tif len(args) == 1 {\n\t\tswitch updates := args[0].(type) {\n\t\tcase starlark.IterableMapping:\n\t\t\t// Iterate over dict's key/value pairs, not just keys.\n\t\t\tfor _, item := range updates.Items() {\n\t\t\t\tif err := recv.SetKey(item[0], item[1]); err != nil {\n\t\t\t\t\treturn nil, err // dict is frozen\n\t\t\t\t}\n\t\t\t}\n\t\tcase starlark.Iterable:\n\t\t\t// all other sequences\n\t\t\titer := starlark.Iterate(updates)\n\t\t\tif iter == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"got %s, want iterable\", updates.Type())\n\t\t\t}\n\t\t\tdefer iter.Done()\n\t\t\tvar pair starlark.Value\n\t\t\tfor i := 0; iter.Next(&pair); i++ {\n\t\t\t\titer2 := starlark.Iterate(pair)\n\t\t\t\tif iter2 == nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"dictionary update sequence element #%d is not iterable (%s)\", i, pair.Type())\n\t\t\t\t}\n\t\t\t\tdefer iter2.Done()\n\t\t\t\tlength := starlark.Len(pair)\n\t\t\t\tif length < 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"dictionary update sequence element #%d has unknown length (%s)\", i, pair.Type())\n\t\t\t\t}\n\t\t\t\tif length != 2 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"dictionary update sequence element #%d has length %d, want 2\", i, length)\n\t\t\t\t}\n\t\t\t\tvar k, v starlark.Value\n\t\t\t\titer2.Next(&k)\n\t\t\t\titer2.Next(&v)\n\t\t\t\terr := recv.SetKey(k, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"cannot update dict: update values are not iterable\")\n\t\t}\n\t}\n\n\t// Then add the kwargs.\n\tbefore := starlark.Len(recv)\n\tfor _, pair := range kwargs {\n\t\tif err := recv.SetKey(pair[0], pair[1]); err != nil {\n\t\t\treturn nil, err // dict is frozen\n\t\t}\n\t}\n\t// In the common case, each kwarg will add another dict entry.\n\t// If that's not so, check whether it is because there was a duplicate kwarg.\n\tif starlark.Len(recv) < before+len(kwargs) {\n\t\tkeys := make(map[starlark.String]bool, len(kwargs))\n\t\tfor _, kv := range kwargs {\n\t\t\tk := kv[0].(starlark.String)\n\t\t\tif keys[k] {\n\t\t\t\treturn nil, fmt.Errorf(\"duplicate keyword arg: %v\", k)\n\t\t\t}\n\t\t\tkeys[k] = true\n\t\t}\n\t}\n\n\treturn starlark.None, nil\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items\nfunc dictItems(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\titems := b.Receiver().(isDict).Items()\n\tres := make([]starlark.Value, len(items))\n\tfor i, item := range items {\n\t\tres[i] = item // convert [2]starlark.Value to starlark.Value\n\t}\n\treturn starlark.NewList(res), nil\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys\nfunc dictKeys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\n\titems := b.Receiver().(isDict).Items()\n\tres := make([]starlark.Value, len(items))\n\tfor i, item := range items {\n\t\tres[i] = item[0]\n\t}\n\treturn starlark.NewList(res), nil\n}\n\n// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update\nfunc dictValues(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"%s: %v\", b.Name(), err)\n\t}\n\titems := b.Receiver().(isDict).Items()\n\tres := make([]starlark.Value, len(items))\n\tfor i, item := range items {\n\t\tres[i] = item[1]\n\t}\n\treturn starlark.NewList(res), nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_starlark/event.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_starlark\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"go.starlark.net/starlark\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype event struct {\n\tev     *formatters.EventMsg\n\tfrozen bool\n}\n\nfunc fromEvent(ev *formatters.EventMsg) *event {\n\treturn &event{\n\t\tev: ev,\n\t}\n}\n\nfunc toEvent(sev *event) *formatters.EventMsg {\n\tif sev == nil {\n\t\treturn nil\n\t}\n\treturn sev.ev\n}\n\n// *event implements starlark.Value\nfunc (s *event) String() string {\n\tb, _ := json.Marshal(s.ev)\n\treturn string(b)\n}\n\n// *event implements starlark.Value\nfunc (s *event) Type() string { return \"Event\" }\n\n// *event implements starlark.Value\nfunc (s *event) Freeze() { s.frozen = true }\n\n// *event implements starlark.Value\nfunc (s *event) Truth() starlark.Bool { return starlark.True }\n\n// *event implements starlark.Value\nfunc (s *event) Hash() (uint32, error) { return 0, errors.New(\"not hashable\") }\n\n// *event implements the starlark.HasAttrs interface.\nfunc (s *event) AttrNames() []string {\n\treturn []string{\"name\", \"timestamp\", \"tags\", \"values\", \"deletes\"}\n}\n\n// *event implements the starlark.HasAttrs interface.\nfunc (s *event) Attr(name string) (starlark.Value, error) {\n\tswitch name {\n\tcase \"name\":\n\t\treturn starlark.String(s.ev.Name), nil\n\tcase \"timestamp\":\n\t\treturn starlark.MakeInt64(s.ev.Timestamp), nil\n\tcase \"tags\":\n\t\treturn s.Tags(), nil\n\tcase \"values\":\n\t\treturn s.Values(), nil\n\tcase \"deletes\":\n\t\treturn s.Deletes(), nil\n\tdefault:\n\t\t// Returning nil, nil indicates \"no such field or method\"\n\t\treturn nil, nil\n\t}\n}\n\n// *event implements the starlark.HasSetField interface.\nfunc (s *event) SetField(name string, value starlark.Value) error {\n\tif s.frozen {\n\t\treturn fmt.Errorf(\"cannot modify frozen event struct\")\n\t}\n\n\tswitch name {\n\tcase \"name\":\n\t\treturn s.SetName(value)\n\tcase \"timestamp\":\n\t\treturn s.SetTimestamp(value)\n\tcase \"tags\":\n\t\treturn s.SetTags(value)\n\tcase \"values\":\n\t\treturn s.SetValues(value)\n\tcase \"deletes\":\n\t\treturn s.SetDeletes(value)\n\tdefault:\n\t\treturn starlark.NoSuchAttrError(\n\t\t\tfmt.Sprintf(\"cannot assign to field %q\", name))\n\t}\n}\n\nfunc (s *event) SetName(name starlark.Value) error {\n\tif name, ok := name.(starlark.String); ok {\n\t\ts.ev.Name = name.GoString()\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"name not a string, %T\", name)\n}\n\nfunc (s *event) Tags() starlark.Value {\n\treturn newDict(\"Tags\", s.ev.Tags)\n}\n\nfunc (s *event) Values() starlark.Value {\n\treturn newDict(\"Values\", s.ev.Values)\n}\n\nfunc (s *event) Deletes() starlark.Value {\n\tif len(s.ev.Deletes) == 0 {\n\t\treturn &starlark.List{}\n\t}\n\tresult := &starlark.List{}\n\tfor _, s := range s.ev.Deletes {\n\t\tv, _ := toStarlarkValue(s)\n\t\tresult.Append(v)\n\t}\n\treturn result\n}\n\nfunc (s *event) Timestamp() starlark.Int {\n\treturn starlark.MakeInt64(s.ev.Timestamp)\n}\n\nfunc (s *event) SetTimestamp(value starlark.Value) error {\n\tswitch v := value.(type) {\n\tcase starlark.Int:\n\t\tns, ok := v.Int64()\n\t\tif !ok {\n\t\t\treturn errors.New(\"type error: expected int64 timestamp\")\n\t\t}\n\t\ts.ev.Timestamp = ns\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"type error: got %T\", v)\n\t}\n}\n\nfunc (s *event) SetTags(value starlark.Value) error {\n\ttags, err := toTags(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.ev.Tags = tags\n\treturn nil\n}\n\nfunc (s *event) SetValues(value starlark.Value) error {\n\tvals, err := toValues(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.ev.Values = vals\n\treturn nil\n}\n\nfunc (s *event) SetDeletes(value starlark.Value) error {\n\tdels, err := toDeletes(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.ev.Deletes = dels\n\treturn nil\n}\n\nfunc newEvent(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar name starlark.String\n\tvar ts starlark.Int\n\tvar tags starlark.Value\n\tvar values starlark.Value\n\tvar deletes starlark.Value\n\n\terr := starlark.UnpackArgs(\"Event\", args, kwargs,\n\t\t\"name\", &name,\n\t\t\"timestamp?\", &ts,\n\t\t\"tags?\", &tags,\n\t\t\"values?\", &values,\n\t\t\"deletes?\", &deletes,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvs, err := toValues(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttgs, err := toTags(tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdels, err := toDeletes(deletes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttimestamp, ok := ts.Int64()\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"failed to represent %v as int64\", ts)\n\t}\n\tev := &formatters.EventMsg{\n\t\tName:      string(name),\n\t\tTimestamp: timestamp,\n\t\tTags:      tgs,\n\t\tValues:    vs,\n\t\tDeletes:   dels,\n\t}\n\treturn &event{\n\t\tev: ev,\n\t}, nil\n}\n\nfunc toValues(value starlark.Value) (map[string]any, error) {\n\tif value == nil {\n\t\treturn make(map[string]any), nil\n\t}\n\tif value, ok := value.(starlark.IterableMapping); ok {\n\t\tresult := make(map[string]any)\n\t\tvar err error\n\t\tfor _, item := range value.Items() {\n\t\t\tk, ok := item[0].(starlark.String)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to represent value name %v as string\", item[0])\n\t\t\t}\n\t\t\tresult[k.GoString()], err = toGoVal(item[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, errors.New(\"unexpected iterable type in values field\")\n}\n\nfunc toTags(value starlark.Value) (map[string]string, error) {\n\tif value == nil {\n\t\treturn make(map[string]string), nil\n\t}\n\tif value, ok := value.(starlark.IterableMapping); ok {\n\t\tresult := make(map[string]string)\n\t\tfor _, item := range value.Items() {\n\t\t\tk, ok := item[0].(starlark.String)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to represent value name %v as string\", item[0])\n\t\t\t}\n\t\t\tv, ok := item[1].(starlark.String)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to represent value name %v as string\", item[1])\n\t\t\t}\n\t\t\tresult[k.GoString()] = v.GoString()\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, errors.New(\"unexpected iterable type in tags field\")\n}\n\nfunc toDeletes(value starlark.Value) ([]string, error) {\n\tif value == nil {\n\t\treturn []string{}, nil\n\t}\n\tif value, ok := value.(starlark.Sequence); ok {\n\t\titer := value.Iterate()\n\t\tdefer iter.Done()\n\t\tresult := make([]string, 0, value.Len())\n\t\tfor {\n\t\t\tvar item starlark.Value\n\t\t\tif iter.Next(&item) {\n\t\t\t\tif s, ok := item.(starlark.String); ok {\n\t\t\t\t\tresult = append(result, s.GoString())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn nil, errors.New(\"sequence item is not a 'string\")\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, errors.New(\"unexpected iterable type in deletes field\")\n}\n\n// toStarlarkValue converts a value to a starlark.Value.\nfunc toStarlarkValue(value any) (starlark.Value, error) {\n\tv := reflect.ValueOf(value)\n\tswitch v.Kind() {\n\tcase reflect.Slice:\n\t\tlength := v.Len()\n\t\tarray := make([]starlark.Value, 0, length)\n\t\tfor i := 0; i < length; i++ {\n\t\t\tsVal, err := toStarlarkValue(v.Index(i).Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn starlark.None, err\n\t\t\t}\n\t\t\tarray = append(array, sVal)\n\t\t}\n\t\treturn starlark.NewList(array), nil\n\tcase reflect.Map:\n\t\tdict := starlark.NewDict(v.Len())\n\t\titer := v.MapRange()\n\t\tfor iter.Next() {\n\t\t\tsKey, err := toStarlarkValue(iter.Key().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn starlark.None, err\n\t\t\t}\n\t\t\tsValue, err := toStarlarkValue(iter.Value().Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn starlark.None, err\n\t\t\t}\n\t\t\tdict.SetKey(sKey, sValue)\n\t\t}\n\t\treturn dict, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn starlark.Float(v.Float()), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn starlark.MakeInt64(v.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn starlark.MakeUint64(v.Uint()), nil\n\tcase reflect.String:\n\t\treturn starlark.String(v.String()), nil\n\tcase reflect.Bool:\n\t\treturn starlark.Bool(v.Bool()), nil\n\t}\n\n\treturn starlark.None, errors.New(\"invalid type\")\n}\n\nfunc toGoVal(value starlark.Value) (any, error) {\n\tswitch v := value.(type) {\n\tcase starlark.Float:\n\t\treturn float64(v), nil\n\tcase starlark.Int:\n\t\tn, ok := v.Int64()\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"cannot represent integer as int64\")\n\t\t}\n\t\treturn n, nil\n\tcase starlark.String:\n\t\treturn string(v), nil\n\tcase starlark.Bool:\n\t\treturn bool(v), nil\n\t}\n\n\treturn nil, errors.New(\"invalid starlark type\")\n}\n\nfunc copyEvent(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar sm *event\n\tif err := starlark.UnpackPositionalArgs(\"copy_event\", args, kwargs, 1, &sm); err != nil {\n\t\treturn nil, err\n\t}\n\ttags := make(map[string]string)\n\tvalues := make(map[string]any)\n\tfor k, v := range sm.ev.Tags {\n\t\ttags[k] = v\n\t}\n\tfor k, v := range sm.ev.Values {\n\t\tvalues[k] = v\n\t}\n\tdup := &event{\n\t\tev: &formatters.EventMsg{\n\t\t\tName:      sm.ev.Name,\n\t\t\tTimestamp: sm.ev.Timestamp,\n\t\t\tTags:      tags,\n\t\t\tValues:    values,\n\t\t},\n\t}\n\n\treturn dup, nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_starlark/event_starlark.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_starlark\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\n\t\"go.starlark.net/lib/math\"\n\t\"go.starlark.net/lib/time\"\n\t\"go.starlark.net/starlark\"\n\t\"go.starlark.net/syntax\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-starlark\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// starlarkProc runs a starlark script on the received events\ntype starlarkProc struct {\n\tformatters.BaseProcessor\n\n\tScript string `mapstructure:\"script,omitempty\" json:\"script,omitempty\"`\n\tSource string `mapstructure:\"source,omitempty\" json:\"source,omitempty\"`\n\tDebug  bool   `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\t// this mutex ensures batches of events are processed in sequence\n\tm       sync.Mutex\n\tthread  *starlark.Thread\n\tapplyFn starlark.Value\n\tlogger  *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &starlarkProc{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *starlarkProc) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\terr = p.validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.thread = &starlark.Thread{\n\t\tPrint: func(_ *starlark.Thread, msg string) {\n\t\t\tp.logger.Printf(\"print(): %v\", msg)\n\t\t},\n\t\tLoad: func(_ *starlark.Thread, module string) (starlark.StringDict, error) {\n\t\t\treturn loadModule(module)\n\t\t},\n\t}\n\t// sourceProgram\n\tbuiltins := starlark.StringDict{}\n\tbuiltins[\"Event\"] = starlark.NewBuiltin(\"Event\", newEvent)\n\tbuiltins[\"copy_event\"] = starlark.NewBuiltin(\"copy_event\", copyEvent)\n\tprog, err := p.sourceProgram(builtins)\n\tif err != nil {\n\t\treturn err\n\t}\n\tglobals, err := prog.Init(p.thread, builtins)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !globals.Has(\"apply\") {\n\t\treturn errors.New(\"missing global function apply\")\n\t}\n\tp.applyFn = globals[\"apply\"]\n\n\tglobals[\"cache\"] = starlark.NewDict(0)\n\n\tglobals.Freeze()\n\tif p.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(p)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\t\treturn nil\n\t\t}\n\t\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (p *starlarkProc) validate() error {\n\tif p.Source == \"\" && p.Script == \"\" {\n\t\treturn errors.New(\"one of 'script' or 'source' must be set\")\n\t}\n\tif p.Source != \"\" && p.Script != \"\" {\n\t\treturn errors.New(\"only one of 'script' or 'source' can be set\")\n\t}\n\treturn nil\n}\n\nfunc (p *starlarkProc) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\tnumMsgs := len(es)\n\tif numMsgs == 0 {\n\t\treturn es\n\t}\n\tsevs := make([]starlark.Value, 0, numMsgs)\n\tfor _, ev := range es {\n\t\tif ev.Tags == nil {\n\t\t\tev.Tags = make(map[string]string)\n\t\t}\n\t\tif ev.Values == nil {\n\t\t\tev.Values = make(map[string]any)\n\t\t}\n\t\tif ev.Deletes == nil {\n\t\t\tev.Deletes = make([]string, 0)\n\t\t}\n\t\tsevs = append(sevs, fromEvent(ev))\n\t}\n\tif len(sevs) == 0 {\n\t\treturn es\n\t}\n\tif p.Debug {\n\t\tp.logger.Printf(\"events input: %v\", sevs)\n\t}\n\tr, err := starlark.Call(p.thread, p.applyFn, sevs, nil)\n\tif err != nil {\n\t\tif p.Debug {\n\t\t\tp.logger.Printf(\"failed to run script with input %v: %v\", sevs, err)\n\t\t} else {\n\t\t\tp.logger.Printf(\"failed to run script: %v\", err)\n\t\t}\n\t\treturn es\n\t}\n\tif p.Debug {\n\t\tp.logger.Printf(\"script output: %+v\", r)\n\t}\n\t// r must implement .Iterate() and .Len()\n\tif r, ok := r.(starlark.Sequence); ok {\n\t\tres := make([]*formatters.EventMsg, 0, r.Len())\n\t\titer := r.Iterate()\n\t\tdefer r.Iterate().Done()\n\t\tfor {\n\t\t\tvar v starlark.Value\n\t\t\tok := iter.Next(&v)\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch v := v.(type) {\n\t\t\tcase *event:\n\t\t\t\tres = append(res, toEvent(v))\n\t\t\tdefault:\n\t\t\t\tp.logger.Printf(\"unexpected return type: %T\", v)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif p.Debug {\n\t\t\tp.logger.Printf(\"resulting events: %v\", res)\n\t\t}\n\t\treturn res\n\t}\n\tp.logger.Printf(\"unexpected script output format, expecting a Sequence of Event, got %T\", r)\n\treturn es\n}\n\nfunc (p *starlarkProc) WithLogger(l *log.Logger) {\n\tif l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *starlarkProc) sourceProgram(builtins starlark.StringDict) (*starlark.Program, error) {\n\tvar src any\n\tif p.Source != \"\" {\n\t\tsrc = p.Source\n\t}\n\toptions := &syntax.FileOptions{\n\t\tSet:            true,\n\t\tGlobalReassign: true,\n\t\tRecursion:      true,\n\t}\n\t_, program, err := starlark.SourceProgramOptions(options, p.Script, src, builtins.Has)\n\treturn program, err\n}\n\nfunc loadModule(module string) (starlark.StringDict, error) {\n\tswitch module {\n\tcase \"math.star\":\n\t\treturn starlark.StringDict{\n\t\t\t\"math\": math.Module,\n\t\t}, nil\n\tcase \"time.star\":\n\t\treturn starlark.StringDict{\n\t\t\t\"time\": time.Module,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"module %q unknown\", module)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_starlark/event_starlark_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_starlark\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc Test_starlarkProc_Apply(t *testing.T) {\n\ttype fields struct {\n\t\tcfg map[string]interface{}\n\t}\n\ttype args struct {\n\t\tes []*formatters.EventMsg\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t\twant   []*formatters.EventMsg\n\t}{\n\t\t{\n\t\t\tname: \"print\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n    print(e)\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"add_tag\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n    e.tags[\"new_tag\"] = \"new_tag\"\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\":    \"v1\",\n\t\t\t\t\t\t\"tag2\":    \"v2\",\n\t\t\t\t\t\t\"new_tag\": \"new_tag\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\":    \"v1\",\n\t\t\t\t\t\t\"tag2\":    \"v2\",\n\t\t\t\t\t\t\"new_tag\": \"new_tag\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"delete_tag\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n    e.tags.pop(\"tag1\")\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"add_value\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n    e.values[\"new_val\"] = \"val\"\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\":    42,\n\t\t\t\t\t\t\"val2\":    \"foo\",\n\t\t\t\t\t\t\"new_val\": \"val\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\":    42,\n\t\t\t\t\t\t\"val2\":    \"foo\",\n\t\t\t\t\t\t\"new_val\": \"val\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"delete_val\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n    e.values.pop(\"val1\")\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"insert_event1\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  ne = Event(\"new_event\")\n  ne.tags[\"tag1\"] = \"tag1\"\n  evs = list(events)\n  evs.append(ne)\n  return evs\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"new_event\",\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"tag1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"insert_event2\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  ne = Event(\"new_event\", 42, {\"a\": \"b\"}, {\"foo\": \"bar\"})\n  print(ne)\n  evs = list(events)\n  evs.append(ne)\n  return evs`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"tag1\": \"v1\",\n\t\t\t\t\t\t\"tag2\": \"v2\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t\"val2\": \"foo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"new_event\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"a\": \"b\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"use_cache\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ncache = {}\ndef apply(*events):\n  evs = []\n  for e in events:\n\ttarget_if = e.tags[\"target\"] + \"_\" + e.tags[\"interface_name\"]\n\tif e.values.get(\"description\"):\n\t  cache[target_if] = e.values[\"description\"]\n  for e in events:\n    if e.values.get(\"description\"):\t\n      continue\n    e.tags[\"description\"] = cache[target_if]\n    evs.append(e)\n  return evs\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"description\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev2\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t\"description\":    \"foo\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set_tags\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n\te.tags = {\"t1\": \"v1\"}\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"t1\": \"v1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set_values\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ndef apply(*events):\n  for e in events:\n\te.values = {\"t1\": \"v1\"}\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"t1\": \"v1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set_deletes\",\n\t\t\tfields: fields{\n\t\t\t\tcfg: map[string]interface{}{\n\t\t\t\t\t\"debug\": true,\n\t\t\t\t\t\"source\": `\ncache = {}\ndef apply(*events):\n  for e in events:\n\te.deletes = [\"path1\", \"path2\"]\n  return events\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"ev1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"target\":         \"router1\",\n\t\t\t\t\t\t\"interface_name\": \"if1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"val1\": 42,\n\t\t\t\t\t},\n\t\t\t\t\tDeletes: []string{\"path1\", \"path2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tp := &starlarkProc{}\n\t\t\terr := p.Init(tt.fields.cfg, formatters.WithLogger(log.New(os.Stderr, \"test\", log.Default().Flags())))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%q failed to init processor: %v\", tt.name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tgot := p.Apply(tt.args.es...)\n\t\t\tt.Logf(\"got : %v\", got)\n\t\t\tt.Logf(\"want: %v\", tt.want)\n\t\t\t// compare lengths first\n\t\t\tif len(got) != len(tt.want) {\n\t\t\t\tt.Logf(\"expected and gotten outputs are not of the same length\")\n\t\t\t\tt.Logf(\"expected: %+v\", tt.want)\n\t\t\t\tt.Logf(\"     got: %+v\", got)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\t//\n\t\t\tfor j := range got {\n\t\t\t\tt.Logf(\"%q index %d, output=%+v\", tt.name, j, got[j])\n\t\t\t\tif !reflect.DeepEqual(got[j].Values, tt.want[j].Values) {\n\t\t\t\t\tt.Logf(\"failed at %s index %d, values are different\", tt.name, j)\n\t\t\t\t\tt.Logf(\"expected: %+v\", tt.want[j])\n\t\t\t\t\tt.Logf(\"     got: %+v\", got[j])\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(got[j].Tags, tt.want[j].Tags) {\n\t\t\t\t\tt.Logf(\"failed at %s index %d, tags are different\", tt.name, j)\n\t\t\t\t\tt.Logf(\"expected: %+v\", tt.want[j])\n\t\t\t\t\tt.Logf(\"     got: %+v\", got[j])\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(got[j].Name, tt.want[j].Name) {\n\t\t\t\t\tt.Logf(\"failed at %s index %d, names are different\", tt.name, j)\n\t\t\t\t\tt.Logf(\"expected: %+v\", tt.want[j])\n\t\t\t\t\tt.Logf(\"     got: %+v\", got[j])\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(got[j].Timestamp, tt.want[j].Timestamp) {\n\t\t\t\t\tt.Logf(\"failed at %s index %d, timestamps are different\", tt.name, j)\n\t\t\t\t\tt.Logf(\"expected: %+v\", tt.want[j])\n\t\t\t\t\tt.Logf(\"     got: %+v\", got[j])\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_strings/event_strings.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_strings\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"golang.org/x/text/cases\"\n\t\"golang.org/x/text/language\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-strings\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n\tnameField     = \"name\"\n\tvalueField    = \"value\"\n)\n\n// stringsp provides some of Golang's strings functions to transform: tags, tag names, values and value names\ntype stringsp struct {\n\tformatters.BaseProcessor\n\n\tTags       []string                `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string                `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tTagNames   []string                `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string                `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tDebug      bool                    `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tTransforms []map[string]*transform `mapstructure:\"transforms,omitempty\" json:\"transforms,omitempty\"`\n\n\ttags      []*regexp.Regexp\n\tvalues    []*regexp.Regexp\n\ttagKeys   []*regexp.Regexp\n\tvalueKeys []*regexp.Regexp\n\n\tlogger *log.Logger\n}\n\ntype transform struct {\n\top string\n\t// apply the transformation on name or value\n\tApplyOn string `mapstructure:\"apply-on,omitempty\" json:\"apply-on,omitempty\"`\n\t// Keep the old value or not if the name changed\n\tKeep bool `mapstructure:\"keep,omitempty\" json:\"keep,omitempty\"`\n\t// string to be replaced\n\tOld string `mapstructure:\"old,omitempty\" json:\"old,omitempty\"`\n\t// replacement string of Old\n\tNew string `mapstructure:\"new,omitempty\" json:\"new,omitempty\"`\n\t// Prefix to be trimmed\n\tPrefix string `mapstructure:\"prefix,omitempty\" json:\"prefix,omitempty\"`\n\t// Suffix to be trimmed\n\tSuffix string `mapstructure:\"suffix,omitempty\" json:\"suffix,omitempty\"`\n\t// character to split on\n\tSplitOn string `mapstructure:\"split-on,omitempty\" json:\"split-on,omitempty\"`\n\t// character to join with\n\tJoinWith string `mapstructure:\"join-with,omitempty\" json:\"join-with,omitempty\"`\n\t// number of first items to ignore when joining\n\tIgnoreFirst int `mapstructure:\"ignore-first,omitempty\" json:\"ignore-first,omitempty\"`\n\t// number of last items to ignore when joining\n\tIgnoreLast int `mapstructure:\"ignore-last,omitempty\" json:\"ignore-last,omitempty\"`\n\n\t//\n\treplaceRegexp *regexp.Regexp\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &stringsp{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (s *stringsp) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(s)\n\t}\n\tfor i := range s.Transforms {\n\t\tfor k := range s.Transforms[i] {\n\t\t\ts.Transforms[i][k].op = k\n\t\t\tswitch k {\n\t\t\tcase \"replace\":\n\t\t\t\ts.Transforms[i][k].replaceRegexp, err = regexp.Compile(s.Transforms[i][k].Old)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// init tags regex\n\ts.tags = make([]*regexp.Regexp, 0, len(s.Tags))\n\tfor _, reg := range s.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.tags = append(s.tags, re)\n\t}\n\t// init tag names regex\n\ts.tagKeys = make([]*regexp.Regexp, 0, len(s.TagNames))\n\tfor _, reg := range s.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.tagKeys = append(s.tagKeys, re)\n\t}\n\t// init values regex\n\ts.values = make([]*regexp.Regexp, 0, len(s.Values))\n\tfor _, reg := range s.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.values = append(s.values, re)\n\t}\n\t// init value Keys regex\n\ts.valueKeys = make([]*regexp.Regexp, 0, len(s.ValueNames))\n\tfor _, reg := range s.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.valueKeys = append(s.valueKeys, re)\n\t}\n\tif s.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(s)\n\t\tif err != nil {\n\t\t\ts.logger.Printf(\"initialized processor '%s': %+v\", processorType, s)\n\t\t\treturn nil\n\t\t}\n\t\ts.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (s *stringsp) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range s.valueKeys {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\ts.logger.Printf(\"value name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\ts.applyValueTransformations(e, k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range s.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\ts.logger.Printf(\"value '%s' matched regex '%s'\", vs, re.String())\n\t\t\t\t\t\ts.applyValueTransformations(e, k, vs)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range s.tagKeys {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\ts.logger.Printf(\"tag name '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\ts.applyTagTransformations(e, k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range s.tags {\n\t\t\t\tif re.MatchString(v) {\n\t\t\t\t\ts.logger.Printf(\"tag '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\ts.applyTagTransformations(e, k, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (s *stringsp) WithLogger(l *log.Logger) {\n\tif s.Debug && l != nil {\n\t\ts.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if s.Debug {\n\t\ts.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (s *stringsp) applyValueTransformations(e *formatters.EventMsg, k string, v interface{}) {\n\tfor _, trans := range s.Transforms {\n\t\tfor _, t := range trans {\n\t\t\tif !t.Keep {\n\t\t\t\tdelete(e.Values, k)\n\t\t\t}\n\t\t\tk, v = t.apply(k, v)\n\t\t\te.Values[k] = v\n\t\t}\n\t}\n}\n\nfunc (s *stringsp) applyTagTransformations(e *formatters.EventMsg, k, v string) {\n\tfor _, trans := range s.Transforms {\n\t\tfor _, t := range trans {\n\t\t\tif !t.Keep {\n\t\t\t\tdelete(e.Tags, k)\n\t\t\t}\n\t\t\tvar vi interface{}\n\t\t\tk, vi = t.apply(k, v)\n\t\t\tif vs, ok := vi.(string); ok {\n\t\t\t\te.Tags[k] = vs\n\t\t\t\tv = vs // change the original value in case it's used in the next transform\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts.logger.Printf(\"failed to assert %v type as string\", vi)\n\t\t}\n\t}\n}\n\nfunc (t *transform) apply(k string, v interface{}) (string, interface{}) {\n\tswitch t.op {\n\tcase \"replace\":\n\t\treturn t.replace(k, v)\n\tcase \"trim-prefix\":\n\t\treturn t.trimPrefix(k, v)\n\tcase \"trim-suffix\":\n\t\treturn t.trimSuffix(k, v)\n\tcase \"title\":\n\t\treturn t.toTitle(k, v)\n\tcase \"to-lower\":\n\t\treturn t.toLower(k, v)\n\tcase \"to-upper\":\n\t\treturn t.toUpper(k, v)\n\tcase \"split\":\n\t\treturn t.split(k, v)\n\tcase \"path-base\":\n\t\treturn t.pathBase(k, v)\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) replace(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = t.replaceRegexp.ReplaceAllString(k, t.New)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = t.replaceRegexp.ReplaceAllString(vs, t.New)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) trimPrefix(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = strings.TrimPrefix(k, t.Prefix)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = strings.TrimPrefix(vs, t.Prefix)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) trimSuffix(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = strings.TrimSuffix(k, t.Suffix)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = strings.TrimSuffix(vs, t.Suffix)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) toTitle(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = cases.Title(language.English).String(k)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = cases.Title(language.English).String(vs)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) toLower(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = strings.ToLower(k)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = strings.ToLower(vs)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) toUpper(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = strings.ToUpper(k)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = strings.ToUpper(vs)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) split(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\titems := strings.Split(k, t.SplitOn)\n\t\tnumItems := len(items)\n\t\tif numItems <= t.IgnoreFirst || numItems <= t.IgnoreLast || t.IgnoreFirst >= numItems-t.IgnoreLast {\n\t\t\treturn \"\", v\n\t\t}\n\t\tk = strings.Join(items[t.IgnoreFirst:numItems-t.IgnoreLast], t.JoinWith)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\titems := strings.Split(vs, t.SplitOn)\n\t\t\tnumItems := len(items)\n\t\t\tif numItems <= t.IgnoreFirst || numItems <= t.IgnoreLast || t.IgnoreFirst >= numItems-t.IgnoreLast {\n\t\t\t\treturn k, \"\"\n\t\t\t}\n\t\t\tv = strings.Join(items[t.IgnoreFirst:numItems-t.IgnoreLast], t.JoinWith)\n\t\t}\n\t}\n\treturn k, v\n}\n\nfunc (t *transform) pathBase(k string, v interface{}) (string, interface{}) {\n\tswitch t.ApplyOn {\n\tcase nameField:\n\t\tk = filepath.Base(k)\n\tcase valueField:\n\t\tif vs, ok := v.(string); ok {\n\t\t\tv = filepath.Base(vs)\n\t\t}\n\t}\n\treturn k, v\n}\n"
  },
  {
    "path": "pkg/formatters/event_strings/event_strings_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_strings\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"replace\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^name$\"},\n\t\t\t\"tag-names\":   []string{\"^tag$\"},\n\t\t\t\"debug\":       true,\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"replace\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tOld:     \"name\",\n\t\t\t\t\t\tNew:     \"new_name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"replace\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tOld:     \"tag\",\n\t\t\t\t\t\tNew:     \"new_tag\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"new_name\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"new_tag\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"trim_prefix\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^prefix_\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"trim-prefix\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tPrefix:  \"prefix_\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"prefix_name\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"prefix_name\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"prefix_name\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"trim-suffix\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"_suffix$\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"trim-suffix\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tSuffix:  \"_suffix\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"name_suffix\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name_suffix\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"name_suffix\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"title\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"title\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"title\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"title\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"title\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"title\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"Title\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"to_upper\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"to_be_capitalized\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"to-upper\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"to_be_capitalized\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"to_be_capitalized\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"to_be_capitalized\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"TO_BE_CAPITALIZED\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"to_lower\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"TO_BE_LOWERED\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"to-lower\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"TO_BE_LOWERED\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"TO_BE_LOWERED\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"TO_BE_LOWERED\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"to_be_lowered\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"split\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"path/to/a/resource\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"split\": &transform{\n\t\t\t\t\t\tApplyOn:     \"name\",\n\t\t\t\t\t\tSplitOn:     \"/\",\n\t\t\t\t\t\tJoinWith:    \"_\",\n\t\t\t\t\t\tIgnoreFirst: 2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"a_resource\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"path_base\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"path/to/a/resource\"},\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"path-base\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"resource\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"path/to/a/resource\": 0,\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"path/to/a/resource\": \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"resource\": 0,\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"replace_regex\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".\"},\n\t\t\t\"tag-names\":   []string{\".\"},\n\t\t\t\"debug\":       true,\n\t\t\t\"transforms\": []map[string]*transform{\n\t\t\t\t{\n\t\t\t\t\t\"replace\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tOld:     \"-state$\",\n\t\t\t\t\t\tNew:     \"-state-code\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"replace\": &transform{\n\t\t\t\t\t\tApplyOn: \"name\",\n\t\t\t\t\t\tOld:     \"-tag$\",\n\t\t\t\t\t\tNew:     \"-better-tag\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"interface-oper-state\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"interface-oper-state-code\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"my-tag\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"my-better-tag\": \"foo\",\n\t\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventStrings(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event strings, item %d, index %d\", i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\ntype item struct {\n\tev *EventMsg\n\tm  map[string]interface{}\n}\n\nvar eventMsgtestSet = map[string][]item{\n\t\"nil\": {\n\t\t{\n\t\t\tev: nil,\n\t\t\tm:  nil,\n\t\t},\n\t\t{\n\t\t\tev: new(EventMsg),\n\t\t\tm:  make(map[string]interface{}),\n\t\t},\n\t},\n\t\"filled\": {\n\t\t{\n\t\t\tev: &EventMsg{\n\t\t\t\tTimestamp: 100,\n\t\t\t\tValues:    map[string]interface{}{\"value1\": int64(1)},\n\t\t\t\tTags:      map[string]string{\"tag1\": \"1\"},\n\t\t\t},\n\t\t\tm: map[string]interface{}{\n\t\t\t\t\"timestamp\": int64(100),\n\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\"value1\": int64(1),\n\t\t\t\t},\n\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tev: &EventMsg{\n\t\t\t\tName:      \"sub1\",\n\t\t\t\tTimestamp: 100,\n\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\"tag2\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tm: map[string]interface{}{\n\t\t\t\t\"name\":      \"sub1\",\n\t\t\t\t\"timestamp\": int64(100),\n\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\"tag2\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tev: &EventMsg{\n\t\t\t\tName:      \"sub1\",\n\t\t\t\tTimestamp: 100,\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"value1\": int64(1),\n\t\t\t\t\t\"value2\": int64(1),\n\t\t\t\t},\n\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\"tag2\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tm: map[string]interface{}{\n\t\t\t\t\"name\":      \"sub1\",\n\t\t\t\t\"timestamp\": int64(100),\n\t\t\t\t\"values\": map[string]interface{}{\n\t\t\t\t\t\"value1\": int64(1),\n\t\t\t\t\t\"value2\": int64(1),\n\t\t\t\t},\n\t\t\t\t\"tags\": map[string]interface{}{\n\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\"tag2\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestToMap(t *testing.T) {\n\tfor name, items := range eventMsgtestSet {\n\t\tfor i, item := range items {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\tout := item.ev.ToMap()\n\t\t\t\tif !reflect.DeepEqual(out, item.m) {\n\t\t\t\t\tt.Logf(\"failed at %q item %d\", name, i)\n\t\t\t\t\tt.Logf(\"expected: (%T)%+v\", item.m, item.m)\n\t\t\t\t\tt.Logf(\"     got: (%T)%+v\", out, out)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestFromMap(t *testing.T) {\n\tfor name, items := range eventMsgtestSet {\n\t\tfor i, item := range items {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\tout, err := EventFromMap(item.m)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Logf(\"failed at %q: %v\", name, err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(out, item.ev) {\n\t\t\t\t\tt.Logf(\"failed at %q item %d\", name, i)\n\t\t\t\t\tt.Logf(\"expected: (%T)%+v\", item.m, item.m)\n\t\t\t\t\tt.Logf(\"     got: (%T)%+v\", out, out)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestTagsFromGNMIPath(t *testing.T) {\n\ttype args struct {\n\t\tp *gnmi.Path\n\t}\n\ttests := []struct {\n\t\tname  string\n\t\targs  args\n\t\twant  string\n\t\twant1 map[string]string\n\t}{\n\t\t{\n\t\t\tname:  \"nil\",\n\t\t\targs:  args{p: nil},\n\t\t\twant:  \"\",\n\t\t\twant1: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"path_no_keys\",\n\t\t\targs: args{p: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"statistics\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\twant:  \"/interface/statistics\",\n\t\t\twant1: make(map[string]string),\n\t\t},\n\t\t{\n\t\t\tname: \"path_with_keys\",\n\t\t\targs: args{p: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"statistics\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\twant: \"/interface/statistics\",\n\t\t\twant1: map[string]string{\n\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"path_with_multiple_keys\",\n\t\t\targs: args{p: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem1\",\n\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\"bar\": \"bar_val\",\n\t\t\t\t\t\t\t\"foo\": \"foo_val\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\twant: \"/elem1/elem2\",\n\t\t\twant1: map[string]string{\n\t\t\t\t\"elem1_bar\": \"bar_val\",\n\t\t\t\t\"elem1_foo\": \"foo_val\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"path_with_multiple_keys_and_target\",\n\t\t\targs: args{p: &gnmi.Path{\n\t\t\t\tTarget: \"target1\",\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem1\",\n\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\"bar\": \"bar_val\",\n\t\t\t\t\t\t\t\"foo\": \"foo_val\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\twant: \"/elem1/elem2\",\n\t\t\twant1: map[string]string{\n\t\t\t\t\"elem1_bar\": \"bar_val\",\n\t\t\t\t\"elem1_foo\": \"foo_val\",\n\t\t\t\t\"target\":    \"target1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"path_with_multiple_keys_target_and_origin\",\n\t\t\targs: args{p: &gnmi.Path{\n\t\t\t\tOrigin: \"origin1\",\n\t\t\t\tTarget: \"target1\",\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem1\",\n\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\"bar\": \"bar_val\",\n\t\t\t\t\t\t\t\"foo\": \"foo_val\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"elem2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\twant: \"origin1:/elem1/elem2\",\n\t\t\twant1: map[string]string{\n\t\t\t\t\"elem1_bar\": \"bar_val\",\n\t\t\t\t\"elem1_foo\": \"foo_val\",\n\t\t\t\t\"target\":    \"target1\",\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, got1 := tagsFromGNMIPath(tt.args.p)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"TagsFromGNMIPath() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t\tif !cmp.Equal(got1, tt.want1) {\n\t\t\t\tt.Errorf(\"TagsFromGNMIPath() got1 = %v, want %v\", got1, tt.want1)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getValueFlat(t *testing.T) {\n\ttype args struct {\n\t\tprefix   string\n\t\tupdValue *gnmi.TypedValue\n\t}\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twant    map[string]interface{}\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"simple_json_value\",\n\t\t\targs: args{\n\t\t\t\tprefix: \"/configure/router/interface\",\n\t\t\t\tupdValue: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\tJsonVal: []byte(`{\n\t\t\t\t\t\t\t\"admin-state\": \"enable\",\n\t\t\t\t\t\t\t\"ipv4\": {\n\t\t\t\t\t\t\t\t\"primary\": {\n\t\t\t\t\t\t\t\t\t\"address\": \"1.1.1.1\",\n\t\t\t\t\t\t\t\t\t\"prefix-length\": 32\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}`),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[string]interface{}{\n\t\t\t\t\"/configure/router/interface/admin-state\":                \"enable\",\n\t\t\t\t\"/configure/router/interface/ipv4/primary/address\":       \"1.1.1.1\",\n\t\t\t\t\"/configure/router/interface/ipv4/primary/prefix-length\": float64(32),\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"json_value_with_list\",\n\t\t\targs: args{\n\t\t\t\tprefix: \"/network-instance\",\n\t\t\t\tupdValue: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{\n\t\t\t\t\t\tJsonVal: []byte(`{\n\t\t\t\t\t\t\t\"interface\": [\n\t\t\t\t\t\t\t\t\"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\"ethernet-1/2\",\n\t\t\t\t\t\t\t\t\"ethernet-1/3\",\n\t\t\t\t\t\t\t\t\"ethernet-1/4\"\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t}`),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: map[string]interface{}{\n\t\t\t\t\"/network-instance/interface.0\": \"ethernet-1/1\",\n\t\t\t\t\"/network-instance/interface.1\": \"ethernet-1/2\",\n\t\t\t\t\"/network-instance/interface.2\": \"ethernet-1/3\",\n\t\t\t\t\"/network-instance/interface.3\": \"ethernet-1/4\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := getValueFlat(tt.args.prefix, tt.args.updValue)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getValueFlat() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !cmp.Equal(got, tt.want) {\n\t\t\t\tfor k, v := range got {\n\t\t\t\t\tfmt.Printf(\"%s: %v: %T\\n\", k, v, v)\n\t\t\t\t}\n\t\t\t\tt.Errorf(\"got:  %+v\", got)\n\t\t\t\tt.Errorf(\"want: %+v\", tt.want)\n\t\t\t\tt.Errorf(\"getValueFlat() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResponseToEventMsgs(t *testing.T) {\n\ttype args struct {\n\t\tname string\n\t\trsp  *gnmi.SubscribeResponse\n\t\tmeta map[string]string\n\t\teps  []EventProcessor\n\t}\n\ttests := []struct {\n\t\tname    string\n\t\targs    args\n\t\twant    []*EventMsg\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"sync_response\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{\n\t\t\t\t\t\tSyncResponse: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant:    []*EventMsg{},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"single_update_ascii_value\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{Name: \"oper-state\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"up\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"/interface/oper-state\": \"up\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"single_update_string_json_value\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{Name: \"oper-state\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{JsonVal: []byte(\"\\\"up\\\"\")},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"/interface/oper-state\": \"up\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"single_update_object_json_value\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{Name: \"statistics\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_JsonVal{JsonVal: []byte(`{\"in-octets\":\"10\",\"out-octets\":\"11\"}`)},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"/interface/statistics/in-octets\":  \"10\",\n\t\t\t\t\t\t\"/interface/statistics/out-octets\": \"11\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_updates_single_ascii_values\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tUpdate: []*gnmi.Update{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{Name: \"admin-state\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"enable\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t{Name: \"oper-state\"},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\t\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: \"up\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"/interface/admin-state\": \"enable\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\"/interface/oper-state\": \"up\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"with_single_delete\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\"/interface\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"with_2_deletes\",\n\t\t\targs: args{\n\t\t\t\tname: \"sub1\",\n\t\t\t\trsp: &gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\t\tDelete: []*gnmi.Path{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/1\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: \"interface\",\n\t\t\t\t\t\t\t\t\t\t\tKey: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\t\"name\": \"ethernet-1/2\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/1\",\n\t\t\t\t\t},\n\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\"/interface\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\"interface_name\": \"ethernet-1/2\",\n\t\t\t\t\t},\n\t\t\t\t\tDeletes: []string{\n\t\t\t\t\t\t\"/interface\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := ResponseToEventMsgs(tt.args.name, tt.args.rsp, tt.args.meta, tt.args.eps...)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ResponseToEventMsgs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"ResponseToEventMsgs() got = %v\", got)\n\t\t\t\tt.Errorf(\"ResponseToEventMsgs() want= %v\", tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_time_epoch/event_time_epoch.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_time_epoch\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-time-epoch\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// epoch converts a time string to epoch time\ntype epoch struct {\n\tformatters.BaseProcessor\n\n\tValues    []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tPrecision string   `mapstructure:\"precision,omitempty\" json:\"precision,omitempty\"`\n\tFormat    string   `mapstructure:\"format,omitempty\" json:\"format,omitempty\"`\n\tDebug     bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalues []*regexp.Regexp\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &epoch{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (d *epoch) Init(cfg any, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\tif d.Format == \"\" {\n\t\td.Format = time.RFC3339\n\t}\n\t// init values regex\n\td.values = make([]*regexp.Regexp, 0, len(d.Values))\n\tfor _, reg := range d.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.values = append(d.values, re)\n\t}\n\tif d.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(d)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"initialized processor '%s': %+v\", processorType, d)\n\t\t\treturn nil\n\t\t}\n\t\td.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (d *epoch) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range d.values {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\td.logger.Printf(\"key '%s' matched regex '%s'\", k, re.String())\n\t\t\t\t\tswitch v := v.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\ttd, err := time.Parse(d.Format, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\td.logger.Printf(\"failed to convert '%v' to time: %v\", v, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvar ts int64\n\t\t\t\t\t\tswitch d.Precision {\n\t\t\t\t\t\tcase \"s\", \"sec\", \"second\":\n\t\t\t\t\t\t\tts = td.Unix()\n\t\t\t\t\t\tcase \"ms\", \"millisecond\":\n\t\t\t\t\t\t\tts = td.UnixMilli()\n\t\t\t\t\t\tcase \"us\", \"microsecond\":\n\t\t\t\t\t\t\tts = td.UnixMicro()\n\t\t\t\t\t\tcase \"ns\", \"nanosecond\":\n\t\t\t\t\t\t\tts = td.UnixNano()\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tts = td.UnixNano()\n\t\t\t\t\t\t}\n\t\t\t\t\t\te.Values[k] = ts\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (d *epoch) WithLogger(l *log.Logger) {\n\tif d.Debug && l != nil {\n\t\td.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if d.Debug {\n\t\td.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_time_epoch/event_time_epoch_test.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_time_epoch\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nfunc Test_epoch_Apply(t *testing.T) {\n\ttype fields map[string]any\n\ttype args struct {\n\t\tes []*formatters.EventMsg\n\t}\n\ttests := []struct {\n\t\tname   string\n\t\tfields fields\n\t\targs   args\n\t\twant   []*formatters.EventMsg\n\t}{\n\t\t{\n\t\t\tname: \"nil_input\",\n\t\t\tfields: map[string]interface{}{\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"simple\",\n\t\t\tfields: map[string]any{\n\t\t\t\t\"precision\": \"s\",\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*last-change\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\"interface/last-change\": int64(1718809884),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ms\",\n\t\t\tfields: map[string]any{\n\t\t\t\t\"precision\": \"ms\",\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*last-change\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\"interface/last-change\": int64(1718809884601),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"us\",\n\t\t\tfields: map[string]any{\n\t\t\t\t\"precision\": \"us\",\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*last-change\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\"interface/last-change\": int64(1718809884601000),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ns\",\n\t\t\tfields: map[string]any{\n\t\t\t\t\"precision\": \"ns\",\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*last-change\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\"interface/last-change\": int64(1718809884601000000),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no_match\",\n\t\t\tfields: map[string]any{\n\t\t\t\t\"precision\": \"ns\",\n\t\t\t\t\"value-names\": []string{\n\t\t\t\t\t\".*no_match.*\",\n\t\t\t\t},\n\t\t\t\t\"debug\": true,\n\t\t\t},\n\t\t\targs: args{\n\t\t\t\tes: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []*formatters.EventMsg{\n\t\t\t\t{\n\t\t\t\t\tName:      \"sub1\",\n\t\t\t\t\tTimestamp: 42,\n\t\t\t\t\tTags:      map[string]string{},\n\t\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\t\"interface/last-change\": \"2024-06-19T15:11:24.601Z\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &epoch{}\n\t\t\terr := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, \"[event-epoch-test]\", log.Flags())))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to init processor in test %q: %v\", tt.name, err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif got := c.Apply(tt.args.es...); !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"epoch.Apply() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_to_tag/event_to_tag.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_to_tag\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-to-tag\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\n// toTag moves ALL values matching any of the regex in .Values to the EventMsg.Tags map.\n// if .Keep is true, the matching values are not deleted from EventMsg.Tags\ntype toTag struct {\n\tformatters.BaseProcessor\n\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tKeep       bool     `mapstructure:\"keep,omitempty\" json:\"keep,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\tvalueNames []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\n\tlogger *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &toTag{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (t *toTag) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(t)\n\t}\n\tt.valueNames = make([]*regexp.Regexp, 0, len(t.ValueNames))\n\tfor _, reg := range t.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.valueNames = append(t.valueNames, re)\n\t}\n\tt.values = make([]*regexp.Regexp, 0, len(t.Values))\n\tfor _, reg := range t.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.values = append(t.values, re)\n\t}\n\tif t.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(t)\n\t\tif err != nil {\n\t\t\tt.logger.Printf(\"initialized processor '%s': %+v\", processorType, t)\n\t\t\treturn nil\n\t\t}\n\t\tt.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\nfunc (t *toTag) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif e.Tags == nil {\n\t\t\te.Tags = make(map[string]string)\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range t.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\tswitch v := v.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\te.Tags[k] = v\n\t\t\t\t\tdefault:\n\t\t\t\t\t\te.Tags[k] = fmt.Sprint(v)\n\t\t\t\t\t}\n\t\t\t\t\tif !t.Keep {\n\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range t.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\te.Tags[k] = vs\n\t\t\t\t\t\tif !t.Keep {\n\t\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (t *toTag) Apply2(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif e.Tags == nil {\n\t\t\te.Tags = make(map[string]string)\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range t.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\te.Tags[k] = fmt.Sprint(v) // always cast v results on extra allocations: Apply > Apply2\n\t\t\t\t\tif !t.Keep {\n\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range t.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\te.Tags[k] = vs\n\t\t\t\t\t\tif !t.Keep {\n\t\t\t\t\t\t\tdelete(e.Values, k)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (t *toTag) WithLogger(l *log.Logger) {\n\tif t.Debug && l != nil {\n\t\tt.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if t.Debug {\n\t\tt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_to_tag/event_to_tag_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_to_tag\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"1_value_match\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".*name$\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": \"dummy\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"1_value_match_with_keep\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".*name$\"},\n\t\t\t\"keep\":        true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": \"dummy\"}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{\"name\": \"dummy\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\"name\": \"dummy\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_value_match\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".*name$\"},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":        \"dummy\",\n\t\t\t\t\t\t\t\"second_name\": \"dummy2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"name\":        \"dummy\",\n\t\t\t\t\t\t\t\"second_name\": \"dummy2\"},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"2_value_match_with_keep\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".*name$\"},\n\t\t\t\"keep\":        true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":        \"dummy\",\n\t\t\t\t\t\t\t\"second_name\": \"dummy2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"name\":        \"dummy\",\n\t\t\t\t\t\t\t\"second_name\": \"dummy2\"},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":        \"dummy\",\n\t\t\t\t\t\t\t\"second_name\": \"dummy2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"match_integer_value\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".*peer-as$\"},\n\t\t\t\"keep\":        true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":    \"dummy\",\n\t\t\t\t\t\t\t\"peer-as\": 65000,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"peer-as\": \"65000\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":    \"dummy\",\n\t\t\t\t\t\t\t\"peer-as\": 65000,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventToTag(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Logf(\"failed at event to_tag %s, item %d, index %d\", name, i, j)\n\t\t\t\t\t\t\tt.Logf(\"expected: %#v\", item.output[j])\n\t\t\t\t\t\t\tt.Logf(\"     got: %#v\", outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n\n// Helper function to generate test messages\nfunc generateTestMessages(count int) []*formatters.EventMsg {\n\tmessages := make([]*formatters.EventMsg, count)\n\tfor i := 0; i < count; i++ {\n\t\tmessages[i] = &formatters.EventMsg{\n\t\t\tName:      fmt.Sprintf(\"event%d\", i),\n\t\t\tTimestamp: int64(i),\n\t\t\tValues: map[string]interface{}{\n\t\t\t\tfmt.Sprintf(\"key%d\", i):  fmt.Sprintf(\"value%d\", i),\n\t\t\t\t\"staticKey\":              \"staticValue\",\n\t\t\t\tfmt.Sprintf(\"tagw%d\", i): fmt.Sprintf(\"value%d\", i),\n\t\t\t},\n\t\t}\n\t}\n\treturn messages\n}\n\n// Benchmark test for the Apply function\nfunc BenchmarkApply(b *testing.B) {\n\t// Create a toTag instance with sample regex patterns\n\ttoTagInstance := &toTag{\n\t\tvalueNames: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(`^key\\d+$`), // Matches keys like \"key1\", \"key2\", etc.\n\t\t},\n\t\tvalues: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(`^value\\d+$`), // Matches values like \"value1\", \"value2\", etc.\n\t\t},\n\t\tKeep: false,\n\t}\n\n\t// Generate a sample EventMsg array\n\teventMessages := generateTestMessages(10000)\n\t// Benchmark the Apply function\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttoTagInstance.Apply(eventMessages...)\n\t}\n}\n\nfunc BenchmarkApply2(b *testing.B) {\n\t// Create a toTag instance with sample regex patterns\n\ttoTagInstance := &toTag{\n\t\tvalueNames: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(`^key\\d+$`), // Matches keys like \"key1\", \"key2\", etc.\n\t\t},\n\t\tvalues: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(`^value\\d+$`), // Matches values like \"value1\", \"value2\", etc.\n\t\t},\n\t\tKeep: false,\n\t}\n\n\t// Generate a sample EventMsg array\n\teventMessages := generateTestMessages(10000)\n\t// Benchmark the Apply function\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttoTagInstance.Apply2(eventMessages...)\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_trigger/event_trigger.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_trigger\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t_ \"github.com/openconfig/gnmic/pkg/actions/all\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType    = \"event-trigger\"\n\tloggingPrefix    = \"[\" + processorType + \"] \"\n\tdefaultCondition = \"any([true])\"\n)\n\n// trigger triggers an action when certain conditions are met\ntype trigger struct {\n\tformatters.BaseProcessor\n\n\tCondition      string                 `mapstructure:\"condition,omitempty\"`\n\tMinOccurrences int                    `mapstructure:\"min-occurrences,omitempty\"`\n\tMaxOccurrences int                    `mapstructure:\"max-occurrences,omitempty\"`\n\tWindow         time.Duration          `mapstructure:\"window,omitempty\"`\n\tActions        []string               `mapstructure:\"actions,omitempty\"`\n\tVars           map[string]interface{} `mapstructure:\"vars,omitempty\"`\n\tVarsFile       string                 `mapstructure:\"vars-file,omitempty\"`\n\tDebug          bool                   `mapstructure:\"debug,omitempty\"`\n\tAsync          bool                   `mapstructure:\"async,omitempty\"`\n\n\toccurrencesTimes []time.Time\n\tlastTrigger      time.Time\n\tcode             *gojq.Code\n\tactions          []actions.Action\n\tvars             map[string]interface{}\n\n\ttargets map[string]*types.TargetConfig\n\tacts    map[string]map[string]interface{}\n\tlogger  *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &trigger{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *trigger) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\n\terr = p.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Condition = strings.TrimSpace(p.Condition)\n\tq, err := gojq.Parse(p.Condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.code, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range p.Actions {\n\t\tif actCfg, ok := p.acts[name]; ok {\n\t\t\terr = p.initializeAction(actCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"failed to initialize action %q: config not found\", name)\n\t}\n\terr = p.readVars()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.logger.Printf(\"%q initialized: %+v\", processorType, p)\n\n\treturn nil\n}\n\nfunc (p *trigger) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\n\tnow := time.Now()\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tcontinue\n\t\t}\n\t\tres, err := formatters.CheckCondition(p.code, e)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed evaluating condition %q: %v\", p.Condition, err)\n\t\t\tcontinue\n\t\t}\n\t\tif p.Debug {\n\t\t\tp.logger.Printf(\"msg=%+v, condition %q result: (%T)%v\", e, p.Condition, res, res)\n\t\t}\n\t\tif res {\n\t\t\tif p.evalOccurrencesWithinWindow(now) {\n\t\t\t\tif p.Async {\n\t\t\t\t\tgo p.triggerActions(e)\n\t\t\t\t} else {\n\t\t\t\t\tp.triggerActions(e)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (p *trigger) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *trigger) WithTargets(tcs map[string]*types.TargetConfig) {\n\tp.targets = tcs\n}\n\nfunc (p *trigger) WithActions(acts map[string]map[string]interface{}) {\n\tif p.Debug {\n\t\tp.logger.Printf(\"with actions: %+v\", acts)\n\t}\n\tp.acts = acts\n}\n\nfunc (p *trigger) initializeAction(cfg map[string]interface{}) error {\n\tif len(cfg) == 0 {\n\t\treturn errors.New(\"missing action definition\")\n\t}\n\tif actType, ok := cfg[\"type\"]; ok {\n\t\tswitch actType := actType.(type) {\n\t\tcase string:\n\t\t\tif in, ok := actions.Actions[actType]; ok {\n\t\t\t\tact := in()\n\t\t\t\terr := act.Init(cfg, actions.WithLogger(p.logger), actions.WithTargets(p.targets))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tp.actions = append(p.actions, act)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unknown action type %q\", actType)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unexpected action field type %T\", actType)\n\t\t}\n\t}\n\treturn errors.New(\"missing type field under action\")\n}\n\nfunc (p *trigger) String() string {\n\tb, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (p *trigger) setDefaults() error {\n\tif p.Condition == \"\" {\n\t\tp.Condition = defaultCondition\n\t}\n\tif p.MinOccurrences <= 0 {\n\t\tp.MinOccurrences = 1\n\t}\n\tif p.MaxOccurrences <= 0 {\n\t\tp.MaxOccurrences = 1\n\t}\n\tif p.MaxOccurrences < p.MinOccurrences {\n\t\treturn errors.New(\"max-occurrences cannot be lower than min-occurrences\")\n\t}\n\tif p.Window <= 0 {\n\t\tp.Window = time.Minute\n\t}\n\treturn nil\n}\n\nfunc (p *trigger) readVars() error {\n\tif p.VarsFile == \"\" {\n\t\tp.vars = p.Vars\n\t\treturn nil\n\t}\n\tb, err := gfile.ReadFile(context.TODO(), p.VarsFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := make(map[string]interface{})\n\terr = yaml.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.vars = utils.MergeMaps(v, p.Vars)\n\treturn nil\n}\n\nfunc (p *trigger) triggerActions(e *formatters.EventMsg) {\n\tactx := &actions.Context{Input: e, Env: make(map[string]interface{}), Vars: p.vars}\n\tfor _, act := range p.actions {\n\t\tres, err := act.Run(context.TODO(), actx)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"trigger action %q failed: %+v\", act.NName(), err)\n\t\t\treturn\n\t\t}\n\t\tactx.Env[act.NName()] = res\n\t\tp.logger.Printf(\"action %q result: %+v\", act.NName(), res)\n\t}\n}\n\nfunc (p *trigger) evalOccurrencesWithinWindow(now time.Time) bool {\n\tif p.occurrencesTimes == nil {\n\t\tp.occurrencesTimes = make([]time.Time, 0)\n\t}\n\toccurrencesInWindow := make([]time.Time, 0, len(p.occurrencesTimes))\n\tif p.Debug {\n\t\tp.logger.Printf(\"occurrencesTimes: %v\", p.occurrencesTimes)\n\t}\n\tfor _, t := range p.occurrencesTimes {\n\t\tif t.Add(p.Window).After(now) {\n\t\t\tif p.Debug {\n\t\t\t\tp.logger.Printf(\"time=%s + %s is after now=%s\", t, p.Window, now)\n\t\t\t}\n\t\t\toccurrencesInWindow = append(occurrencesInWindow, t)\n\t\t}\n\t}\n\tp.occurrencesTimes = append(occurrencesInWindow, now)\n\tnumOccurrences := len(p.occurrencesTimes)\n\tif numOccurrences > p.MaxOccurrences {\n\t\tp.occurrencesTimes = p.occurrencesTimes[numOccurrences-p.MaxOccurrences-1:]\n\t\tnumOccurrences = len(p.occurrencesTimes)\n\t}\n\n\tif p.Debug {\n\t\tp.logger.Printf(\"numOccurrences: %d\", numOccurrences)\n\t}\n\n\tif numOccurrences >= p.MinOccurrences && numOccurrences <= p.MaxOccurrences {\n\t\tp.lastTrigger = now\n\t\treturn true\n\t}\n\t// check last trigger\n\tif numOccurrences > p.MinOccurrences && p.lastTrigger.Add(p.Window).Before(now) {\n\t\tp.lastTrigger = now\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *trigger) WithProcessors(procs map[string]map[string]any) {}\n"
  },
  {
    "path": "pkg/formatters/event_trigger/event_trigger_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_trigger\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar actionsCfg = map[string]map[string]interface{}{\n\t\"dummy1\": {\n\t\t\"name\": \"dummy1\",\n\t\t\"type\": \"http\",\n\t},\n\t\"dummy2\": {\n\t\t\"name\": \"dummy2\",\n\t\t\"type\": \"http\",\n\t\t\"url\":  \"http://remote-alerting-system:9090/\",\n\t},\n}\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"init\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"debug\": true,\n\t\t\t\"actions\": []string{\n\t\t\t\t\"dummy1\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"with_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": `.values[\"counter1\"] > 90`,\n\t\t\t\"debug\":     true,\n\t\t\t\"actions\": []string{\n\t\t\t\t\"dummy2\",\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 89,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter1\": 89,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"sub1\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"tag1\": \"1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\t\t\"counter2\": 91,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nvar triggerOccWindowTestSet = map[string]struct {\n\tt   *trigger\n\tnow time.Time\n\tout bool\n}{\n\t\"defaults_0_occurrences\": {\n\t\tt: &trigger{\n\t\t\tlogger:           log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:            true,\n\t\t\tMinOccurrences:   1,\n\t\t\tMaxOccurrences:   1,\n\t\t\tWindow:           time.Minute,\n\t\t\toccurrencesTimes: []time.Time{},\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n\t\"defaults_with_1_occurrence_in_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 1,\n\t\t\tMaxOccurrences: 1,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-time.Second),\n\t\t\t},\n\t\t\tlastTrigger: time.Now().Add(-time.Second),\n\t\t},\n\t\tout: false,\n\t\tnow: time.Now(),\n\t},\n\t\"defaults_with_1_occurrence_out_of_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 1,\n\t\t\tMaxOccurrences: 1,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-time.Hour),\n\t\t\t},\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_1min_without_occurrences\": {\n\t\tt: &trigger{\n\t\t\tlogger:           log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:            true,\n\t\t\tMinOccurrences:   1,\n\t\t\tMaxOccurrences:   2,\n\t\t\tWindow:           time.Minute,\n\t\t\toccurrencesTimes: []time.Time{},\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_1min_with_1occurrence_in_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 1,\n\t\t\tMaxOccurrences: 2,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-30 * time.Second),\n\t\t\t},\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_1min_with_2occurrences_in_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 1,\n\t\t\tMaxOccurrences: 2,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-10 * time.Second),\n\t\t\t\ttime.Now().Add(-30 * time.Second),\n\t\t\t},\n\t\t\tlastTrigger: time.Now().Add(-10 * time.Second),\n\t\t},\n\t\tout: false,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_2min_without_occurrences\": {\n\t\tt: &trigger{\n\t\t\tlogger:           log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:            true,\n\t\t\tMinOccurrences:   2,\n\t\t\tMaxOccurrences:   2,\n\t\t\tWindow:           time.Minute,\n\t\t\toccurrencesTimes: []time.Time{},\n\t\t},\n\t\tout: false,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_2min_with_1occurrence_in_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 2,\n\t\t\tMaxOccurrences: 2,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-30 * time.Second),\n\t\t\t},\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_2min_with_2occurrences_in_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 2,\n\t\t\tMaxOccurrences: 2,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-10 * time.Second),\n\t\t\t\ttime.Now().Add(-30 * time.Second),\n\t\t\t},\n\t\t\tlastTrigger: time.Now().Add(-10 * time.Second),\n\t\t},\n\t\tout: false,\n\t\tnow: time.Now(),\n\t},\n\t\"2max_2min_with_2occurrences_in_window_lastTrigger_out_of_window\": {\n\t\tt: &trigger{\n\t\t\tlogger:         log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tDebug:          true,\n\t\t\tMinOccurrences: 2,\n\t\t\tMaxOccurrences: 2,\n\t\t\tWindow:         time.Minute,\n\t\t\toccurrencesTimes: []time.Time{\n\t\t\t\ttime.Now().Add(-10 * time.Second),\n\t\t\t\ttime.Now().Add(-30 * time.Second),\n\t\t\t},\n\t\t\tlastTrigger: time.Now().Add(-61 * time.Second),\n\t\t},\n\t\tout: true,\n\t\tnow: time.Now(),\n\t},\n}\n\nfunc TestEventTrigger(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor,\n\t\t\t\tformatters.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)),\n\t\t\t\tformatters.WithActions(actionsCfg),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tif len(outs) != len(item.output) {\n\t\t\t\t\t\tt.Errorf(\"failed at %s, result has a different length than the expected result\", name)\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !cmp.Equal(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, expected %+v, got: %+v\", name, i, j, item.output[j], outs[j])\n\t\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n\nfunc TestOccurrenceTrigger(t *testing.T) {\n\tfor name, ts := range triggerOccWindowTestSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tok := ts.t.evalOccurrencesWithinWindow(ts.now)\n\t\t\tt.Logf(\"%q result: %v\", name, ok)\n\t\t\tif ok != ts.out {\n\t\t\t\tt.Errorf(\"failed at %s , expected %+v, got: %+v\", name, ts.out, ok)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/event_value_tag/event_value_tag.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_value_tag\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-value-tag\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\ntype valueTag struct {\n\tformatters.BaseProcessor\n\tTagName   string `mapstructure:\"tag-name,omitempty\" json:\"tag-name,omitempty\"`\n\tValueName string `mapstructure:\"value-name,omitempty\" json:\"value-name,omitempty\"`\n\tConsume   bool   `mapstructure:\"consume,omitempty\" json:\"consume,omitempty\"`\n\tDebug     bool   `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tlogger    *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &valueTag{logger: log.New(io.Discard, \"\", 0)}\n\t})\n}\n\nfunc (vt *valueTag) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, vt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif vt.TagName == \"\" {\n\t\tvt.TagName = vt.ValueName\n\t}\n\tfor _, opt := range opts {\n\t\topt(vt)\n\t}\n\n\tif vt.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(vt)\n\t\tif err != nil {\n\t\t\tvt.logger.Printf(\"initialized processor '%s': %+v\", processorType, vt)\n\t\t\treturn nil\n\t\t}\n\t\tvt.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\ntype tagVal struct {\n\ttags  map[string]string\n\tvalue interface{}\n}\n\nfunc (vt *valueTag) Apply(evs ...*formatters.EventMsg) []*formatters.EventMsg {\n\tvts := vt.buildApplyRules(evs)\n\tfor _, tv := range vts {\n\t\tfor _, ev := range evs {\n\t\t\tmatch := compareTags(tv.tags, ev.Tags)\n\t\t\tif match {\n\t\t\t\tswitch v := tv.value.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tev.Tags[vt.TagName] = v\n\t\t\t\tdefault:\n\t\t\t\t\tev.Tags[vt.TagName] = fmt.Sprint(tv.value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn evs\n}\n\nfunc (vt *valueTag) WithLogger(l *log.Logger) {\n\tif vt.Debug && l != nil {\n\t\tvt.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if vt.Debug {\n\t\tvt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\n// returns true if all keys match, false otherwise.\nfunc compareTags(a map[string]string, b map[string]string) bool {\n\tif len(a) > len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\tif vv, ok := b[k]; !ok || v != vv {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (vt *valueTag) WithProcessors(procs map[string]map[string]any) {}\n\nfunc (vt *valueTag) buildApplyRules(evs []*formatters.EventMsg) []*tagVal {\n\ttoApply := make([]*tagVal, 0)\n\tfor _, ev := range evs {\n\t\tif v, ok := ev.Values[vt.ValueName]; ok {\n\t\t\ttoApply = append(toApply,\n\t\t\t\t&tagVal{\n\t\t\t\t\ttags:  copyTags(ev.Tags),\n\t\t\t\t\tvalue: v,\n\t\t\t\t})\n\t\t\tif vt.Consume {\n\t\t\t\tdelete(ev.Values, vt.ValueName)\n\t\t\t}\n\t\t}\n\t}\n\treturn toApply\n}\n\nfunc copyTags(src map[string]string) map[string]string {\n\tdest := make(map[string]string, len(src))\n\tfor k, v := range src {\n\t\tdest[k] = v\n\t}\n\treturn dest\n}\n"
  },
  {
    "path": "pkg/formatters/event_value_tag/event_value_tag_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_value_tag\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"no-options\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-name\": \"foo\",\n\t\t\t\"debug\":      true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"other_val\": \"val\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 4,\n\t\t\t\t\t\tTags:      map[string]string{\"foo\": \"other_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"other_val\": \"val\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"other_val\": \"val\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 4,\n\t\t\t\t\t\tTags:      map[string]string{\"foo\": \"other_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"other_val\": \"val\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"rename-tag\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-name\": \"foo\",\n\t\t\t\"tag-name\":   \"bar\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"new_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"consume-value\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-name\": \"foo\",\n\t\t\t\"consume\":    true,\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t\tValues:    make(map[string]interface{}, 0),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"integer_val\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-name\": \"foo\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": 42},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"42\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"42\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": 42},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value1\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value1\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventValueTag(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tp := pi()\n\t\t\terr := p.Init(ts.processor, formatters.WithLogger(log.Default()))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, expected %+v\", name, i, j, item.output[j])\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, got:     %+v\", name, i, j, outs[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n\nfunc generateEventMsgs(numEvents, numValues int, targetKey, targetValue string) []*formatters.EventMsg {\n\tevs := make([]*formatters.EventMsg, numEvents)\n\tfor i := 0; i < numEvents; i++ {\n\t\tvalues := make(map[string]any)\n\t\tfor j := 0; j < numValues; j++ {\n\t\t\tvalues[fmt.Sprintf(\"key%d\", j)] = fmt.Sprintf(\"value%d\", j)\n\t\t}\n\t\tvalues[targetKey] = targetValue\n\t\tevs[i] = &formatters.EventMsg{\n\t\t\tTags:   map[string]string{\"tag\": \"test\"},\n\t\t\tValues: values,\n\t\t}\n\t}\n\treturn evs\n}\n\nfunc BenchmarkBuildApplyRules(b *testing.B) {\n\tevs := generateEventMsgs(100_000, 10, \"targetKey\", \"targetValue\")\n\tvt := &valueTag{ValueName: \"targetKey\", Consume: true}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvt.buildApplyRules(evs)\n\t}\n}\n\nfunc BenchmarkBuildApplyRules2(b *testing.B) {\n\tevs := generateEventMsgs(100_000, 10, \"targetKey\", \"targetValue\")\n\tvt := &valueTag{ValueName: \"targetKey\", Consume: true}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvt.buildApplyRules2(evs)\n\t}\n}\n\n// as ref\nfunc (vt *valueTag) buildApplyRules2(evs []*formatters.EventMsg) []*tagVal {\n\ttoApply := make([]*tagVal, 0)\n\n\tfor _, ev := range evs {\n\t\tfor k, v := range ev.Values {\n\t\t\tif vt.ValueName == k {\n\t\t\t\ttoApply = append(toApply, &tagVal{\n\t\t\t\t\t// copyTags(ev.Tags),\n\t\t\t\t\tev.Tags,\n\t\t\t\t\tv,\n\t\t\t\t})\n\t\t\t\tif vt.Consume {\n\t\t\t\t\tdelete(ev.Values, vt.ValueName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn toApply\n}\n"
  },
  {
    "path": "pkg/formatters/event_value_tag_v2/event_value_tag_v2.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_value_tag_v2\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"hash/fnv\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"slices\"\n\t\"sync\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-value-tag-v2\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\nvar (\n\teqByte   = []byte(\"=\")\n\tsemiC    = []byte(\";\")\n\tpipeByte = []byte(\"|\")\n)\n\ntype valueTag struct {\n\tformatters.BaseProcessor\n\tRules  []*rule `mapstructure:\"rules,omitempty\" json:\"rules,omitempty\"`\n\tDebug  bool    `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tlogger *log.Logger\n\n\tm          *sync.RWMutex\n\tapplyRules []map[uint64]*applyRule\n}\n\ntype rule struct {\n\tTagName   string `mapstructure:\"tag-name,omitempty\" json:\"tag-name,omitempty\"`\n\tValueName string `mapstructure:\"value-name,omitempty\" json:\"value-name,omitempty\"`\n\tConsume   bool   `mapstructure:\"consume,omitempty\" json:\"consume,omitempty\"`\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &valueTag{m: new(sync.RWMutex), logger: log.New(io.Discard, \"\", 0)}\n\t})\n}\n\nfunc (vt *valueTag) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, vt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(vt)\n\t}\n\tfor _, r := range vt.Rules {\n\t\tif r.TagName == \"\" {\n\t\t\tr.TagName = r.ValueName\n\t\t}\n\t}\n\n\tvt.applyRules = make([]map[uint64]*applyRule, len(vt.Rules))\n\tfor i := range vt.applyRules {\n\t\tvt.applyRules[i] = make(map[uint64]*applyRule, 0)\n\t}\n\n\tif vt.logger.Writer() != io.Discard {\n\t\tb, err := json.Marshal(vt)\n\t\tif err != nil {\n\t\t\tvt.logger.Printf(\"initialized processor '%s': %+v\", processorType, vt)\n\t\t\treturn nil\n\t\t}\n\t\tvt.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\t}\n\treturn nil\n}\n\ntype applyRule struct {\n\t// Set of tags that must be present in a message\n\t// in order to add the value as tag.\n\ttags map[string]string\n\t// The value to be added as tag.\n\t// The tag name is taken from the main proc struct.\n\tvalue any\n}\n\nfunc (vt *valueTag) Apply(evs ...*formatters.EventMsg) []*formatters.EventMsg {\n\tvt.m.Lock()\n\tdefer vt.m.Unlock()\n\n\tfor _, ev := range evs {\n\t\tfor i, r := range vt.Rules {\n\t\t\tif v, ok := ev.Values[r.ValueName]; ok {\n\t\t\t\t// calculate apply rule Key\n\t\t\t\tk := vt.applyRuleKey(ev.Tags, r)\n\t\t\t\tvt.applyRules[i][k] = &applyRule{\n\t\t\t\t\ttags:  copyTags(ev.Tags), // copy map\n\t\t\t\t\tvalue: v,\n\t\t\t\t}\n\t\t\t\tif r.Consume {\n\t\t\t\t\tdelete(ev.Values, r.ValueName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ar := range vt.applyRules[i] {\n\t\t\t\tif includedIn(ar.tags, ev.Tags) {\n\t\t\t\t\tswitch v := ar.value.(type) {\n\t\t\t\t\tcase string:\n\t\t\t\t\t\tev.Tags[r.TagName] = v\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tev.Tags[r.TagName] = fmt.Sprint(ar.value)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn evs\n}\n\nfunc (vt *valueTag) WithLogger(l *log.Logger) {\n\tif vt.Debug && l != nil {\n\t\tvt.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if vt.Debug {\n\t\tvt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\n// comparison logic for maps\n// i.e: a ⊆ b\nfunc includedIn(a, b map[string]string) bool {\n\tif len(a) > len(b) {\n\t\treturn false\n\t}\n\tfor k, v := range a {\n\t\tif bv, ok := b[k]; !ok || v != bv {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// the apply rule key is a hash of the valueName and the event msg tags\nfunc (vt *valueTag) applyRuleKey(m map[string]string, r *rule) uint64 {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tslices.Sort(keys)\n\n\th := fnv.New64a()\n\th.Write([]byte(r.ValueName))\n\th.Write(pipeByte)\n\tfor _, k := range keys {\n\t\th.Write([]byte(k))\n\t\th.Write(eqByte)\n\t\th.Write([]byte(m[k]))\n\t\th.Write(semiC)\n\t}\n\treturn h.Sum64()\n}\n\nfunc copyTags(src map[string]string) map[string]string {\n\tdest := make(map[string]string, len(src))\n\tfor k, v := range src {\n\t\tdest[k] = v\n\t}\n\treturn dest\n}\n"
  },
  {
    "path": "pkg/formatters/event_value_tag_v2/event_value_tag_v2_test.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_value_tag_v2\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  []*formatters.EventMsg\n\toutput []*formatters.EventMsg\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"no-options\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"rules\": []map[string]any{\n\t\t\t\t{\"value-name\": \"foo\"},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\t// `foo`` value becomes a tag\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// no change\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// foo value becomes a tag\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": 42},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"42\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": 42},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"42\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"rename-tag\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"rules\": []map[string]any{\n\t\t\t\t{\n\t\t\t\t\t\"value-name\": \"foo\",\n\t\t\t\t\t\"tag-name\":   \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"new_value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"counter1\": \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"consume-value\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"rules\": []map[string]any{\n\t\t\t\t{\n\t\t\t\t\t\"value-name\": \"foo\",\n\t\t\t\t\t\"consume\":    true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t\tValues:    make(map[string]interface{}, 0),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t\"multiple-rules\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"rules\": []map[string]any{\n\t\t\t\t{\n\t\t\t\t\t\"value-name\": \"foo\",\n\t\t\t\t\t\"consume\":    true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"value-name\": \"bar\",\n\t\t\t\t\t// \"consume\":    true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\ttests: []item{\n\t\t\t// 0\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: nil,\n\t\t\t},\n\t\t\t// 1\n\t\t\t{\n\t\t\t\tinput:  make([]*formatters.EventMsg, 0),\n\t\t\t\toutput: make([]*formatters.EventMsg, 0),\n\t\t\t},\n\t\t\t// 2\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t\tValues:    make(map[string]interface{}, 0),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"foo\": \"new_value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 3\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"}, // value to be copied to tags\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\"},\n\t\t\t\t\t},\n\n\t\t\t\t\t{ // this message should remain unchanged\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// 4\n\t\t\t{\n\t\t\t\tinput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"foo\": \"value\"}, // value to be copied to tags\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 4,\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"}, // value to be copied to tags\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\"},\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"value\"},\n\t\t\t\t\t},\n\n\t\t\t\t\t{ // this message should remain unchanged\n\t\t\t\t\t\tTimestamp: 5,\n\t\t\t\t\t\tTags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{ // this message should remain unchanged\n\t\t\t\t\t\tTimestamp: 6,\n\t\t\t\t\t\t// Tags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toutput: []*formatters.EventMsg{\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 2,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 4,\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t\tValues:    map[string]interface{}{\"bar\": \"value\"}, // value to be copied to tags\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 1,\n\t\t\t\t\t\tTags:      map[string]string{\"tag1\": \"value\", \"foo\": \"value\"},\n\t\t\t\t\t},\n\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 3,\n\t\t\t\t\t\tTags:      map[string]string{\"tag2\": \"value\", \"bar\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tTimestamp: 5,\n\t\t\t\t\t\tTags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t\t{ // this message should remain unchanged\n\t\t\t\t\t\tTimestamp: 6,\n\t\t\t\t\t\t// Tags:      map[string]string{\"other_tag\": \"value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventValueTag(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tif pi, ok := formatters.EventProcessors[ts.processorType]; ok {\n\t\t\tt.Log(\"found processor\")\n\t\t\tfor i, item := range ts.tests {\n\t\t\t\t// a processor per test item\n\t\t\t\tp := pi()\n\t\t\t\terr := p.Init(ts.processor, formatters.WithLogger(log.New(os.Stderr, \"test\", log.Flags())))\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tt.Logf(\"processor: %+v\", p)\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Logf(\"running test item %d\", i)\n\t\t\t\t\t// _ = p.Apply(item.input...)\n\t\t\t\t\touts := p.Apply(item.input...)\n\t\t\t\t\tfor j := range outs {\n\t\t\t\t\t\tif !reflect.DeepEqual(outs[j], item.output[j]) {\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, expected %+v\", name, i, j, item.output[j])\n\t\t\t\t\t\t\tt.Errorf(\"failed at %s item %d, index %d, got:     %+v\", name, i, j, outs[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"event processor %s not found\", ts.processorType)\n\t\t}\n\t}\n}\n\nfunc TestValueTagApplySubsequentRuns(t *testing.T) {\n\tprocessor := &valueTag{\n\t\tRules: []*rule{\n\t\t\t{\n\t\t\t\tTagName:   \"moved-tag\",\n\t\t\t\tValueName: \"important-value\",\n\t\t\t\tConsume:   true,\n\t\t\t},\n\t\t},\n\t\tDebug:  true,\n\t\tlogger: log.Default(),\n\t\tm:      new(sync.RWMutex),\n\t\tapplyRules: []map[uint64]*applyRule{\n\t\t\tmake(map[uint64]*applyRule),\n\t\t},\n\t}\n\n\t// first set\n\tevents1 := []*formatters.EventMsg{\n\t\t{\n\t\t\tTags: map[string]string{\"tag1\": \"value1\"},\n\t\t\tValues: map[string]interface{}{\n\t\t\t\t\"important-value\": \"value-to-move\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tTags: map[string]string{\"tag2\": \"value2\"},\n\t\t\tValues: map[string]interface{}{\n\t\t\t\t\"other-value\": \"irrelevant\",\n\t\t\t},\n\t\t},\n\t}\n\n\t// first apply\n\tprocessed1 := processor.Apply(events1...)\n\n\t// assert\n\tassert.Equal(t, \"value-to-move\", processed1[0].Tags[\"moved-tag\"])\n\tassert.NotContains(t, processed1[0].Values, \"important-value\")\n\tassert.NotContains(t, processed1[1].Tags, \"moved-tag\")\n\n\t// second set\n\tevents2 := []*formatters.EventMsg{\n\t\t{\n\t\t\tTags: map[string]string{\n\t\t\t\t\"tag1\": \"value1\",\n\t\t\t},\n\t\t\tValues: map[string]interface{}{\n\t\t\t\t\"new-value\": \"some-new-data\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tTags: map[string]string{\n\t\t\t\t\"tag1\": \"value1\",\n\t\t\t},\n\t\t\tValues: map[string]interface{}{\n\t\t\t\t\"counter1\": 42,\n\t\t\t},\n\t\t},\n\t}\n\n\t// second apply\n\tprocessed2 := processor.Apply(events2...)\n\t// assert\n\tassert.Equal(t, \"value-to-move\", processed2[0].Tags[\"moved-tag\"])\n\tassert.Contains(t, processed2[0].Tags, \"tag1\")\n\tassert.Contains(t, processed2[0].Values, \"new-value\")\n\tassert.Contains(t, processed2[1].Tags, \"tag1\")\n\tassert.Contains(t, processed2[1].Values, \"counter1\")\n}\n"
  },
  {
    "path": "pkg/formatters/event_write/event_write.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_write\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/itchyny/gojq\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\nconst (\n\tprocessorType = \"event-write\"\n\tloggingPrefix = \"[\" + processorType + \"] \"\n)\n\ntype write struct {\n\tformatters.BaseProcessor\n\tCondition  string   `mapstructure:\"condition,omitempty\"`\n\tTags       []string `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tValues     []string `mapstructure:\"values,omitempty\" json:\"values,omitempty\"`\n\tTagNames   []string `mapstructure:\"tag-names,omitempty\" json:\"tag-names,omitempty\"`\n\tValueNames []string `mapstructure:\"value-names,omitempty\" json:\"value-names,omitempty\"`\n\tDst        string   `mapstructure:\"dst,omitempty\" json:\"dst,omitempty\"`\n\tSeparator  string   `mapstructure:\"separator,omitempty\" json:\"separator,omitempty\"`\n\tIndent     string   `mapstructure:\"indent,omitempty\" json:\"indent,omitempty\"`\n\tDebug      bool     `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\n\ttags       []*regexp.Regexp\n\tvalues     []*regexp.Regexp\n\ttagNames   []*regexp.Regexp\n\tvalueNames []*regexp.Regexp\n\tdst        io.Writer\n\tsep        []byte\n\tcode       *gojq.Code\n\tlogger     *log.Logger\n}\n\nfunc init() {\n\tformatters.Register(processorType, func() formatters.EventProcessor {\n\t\treturn &write{\n\t\t\tlogger: log.New(io.Discard, \"\", 0),\n\t\t}\n\t})\n}\n\nfunc (p *write) Init(cfg interface{}, opts ...formatters.Option) error {\n\terr := formatters.DecodeConfig(cfg, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(p)\n\t}\n\tp.Condition = strings.TrimSpace(p.Condition)\n\tq, err := gojq.Parse(p.Condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.code, err = gojq.Compile(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif p.Separator == \"\" {\n\t\tp.sep = []byte(\"\\n\")\n\t} else {\n\t\tp.sep = []byte(p.Separator)\n\t}\n\tp.tags = make([]*regexp.Regexp, 0, len(p.Tags))\n\tfor _, reg := range p.Tags {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.tags = append(p.tags, re)\n\t}\n\t//\n\tp.values = make([]*regexp.Regexp, 0, len(p.values))\n\tfor _, reg := range p.Values {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.values = append(p.values, re)\n\t}\n\t//\n\tp.tagNames = make([]*regexp.Regexp, 0, len(p.TagNames))\n\tfor _, reg := range p.TagNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.tagNames = append(p.tagNames, re)\n\t}\n\t//\n\tp.valueNames = make([]*regexp.Regexp, 0, len(p.ValueNames))\n\tfor _, reg := range p.ValueNames {\n\t\tre, err := regexp.Compile(reg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.valueNames = append(p.valueNames, re)\n\t}\n\tswitch p.Dst {\n\tcase \"\", \"stdout\":\n\t\tp.dst = os.Stdout\n\tcase \"stderr\":\n\t\tp.dst = os.Stderr\n\tdefault:\n\t\tp.dst, err = os.OpenFile(p.Dst, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tb, err := json.Marshal(p)\n\tif err != nil {\n\t\tp.logger.Printf(\"initialized processor '%s': %+v\", processorType, p)\n\t\treturn nil\n\t}\n\tp.logger.Printf(\"initialized processor '%s': %s\", processorType, string(b))\n\treturn nil\n}\n\nfunc (p *write) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg {\nOUTER:\n\tfor _, e := range es {\n\t\tif e == nil {\n\t\t\tp.dst.Write([]byte(\"\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tok, err := formatters.CheckCondition(p.code, e)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"condition check failed: %v\", err)\n\t\t}\n\t\tif ok {\n\t\t\terr := p.write(e)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed to write to destination: %v\", err)\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Values {\n\t\t\tfor _, re := range p.values {\n\t\t\t\tif vs, ok := v.(string); ok {\n\t\t\t\t\tif re.MatchString(vs) {\n\t\t\t\t\t\terr := p.write(e)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tp.logger.Printf(\"failed to write to destination: %v\", err)\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range p.valueNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\terr := p.write(e)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tp.logger.Printf(\"failed to write to destination: %v\", err)\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k, v := range e.Tags {\n\t\t\tfor _, re := range p.tagNames {\n\t\t\t\tif re.MatchString(k) {\n\t\t\t\t\terr := p.write(e)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tp.logger.Printf(\"failed to write to destination: %v\", err)\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, re := range p.tags {\n\t\t\t\tif re.MatchString(v) {\n\t\t\t\t\terr := p.write(e)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tp.logger.Printf(\"failed to write to destination: %v\", err)\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn es\n}\n\nfunc (p *write) WithLogger(l *log.Logger) {\n\tif p.Debug && l != nil {\n\t\tp.logger = log.New(l.Writer(), loggingPrefix, l.Flags())\n\t} else if p.Debug {\n\t\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\t}\n}\n\nfunc (p *write) write(e *formatters.EventMsg) error {\n\tvar b []byte\n\tvar err error\n\tif len(p.Indent) > 0 {\n\t\tb, err = json.MarshalIndent(e, \"\", p.Indent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tb, err = json.Marshal(e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tp.dst.Write(append(b, p.sep...))\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/formatters/event_write/event_write_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage event_write\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n)\n\ntype item struct {\n\tinput  *formatters.EventMsg\n\toutput string\n}\n\nvar testset = map[string]struct {\n\tprocessorType string\n\tprocessor     map[string]interface{}\n\ttests         []item\n}{\n\t\"write_condition\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"condition\": `.values.number == \"42\"`,\n\t\t\t\"separator\": \"sep\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{\"name\": \"foo\"},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"tags\":{\"name\":\"foo\"},\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t},\n\t},\n\t\"write_values_all\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\".\"},\n\t\t\t\"separator\":   \"sep\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{\"name\": \"foo\"},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"tags\":{\"name\":\"foo\"},\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t},\n\t},\n\t\"write_values_some\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"value-names\": []string{\"^number\"},\n\t\t\t\"separator\":   \"sep\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{\"name\": \"foo\"},\n\t\t\t\t\tValues: map[string]interface{}{\"not_number\": \"42\"}},\n\t\t\t\toutput: ``,\n\t\t\t},\n\t\t},\n\t},\n\t\"write_tags_all\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\".\"},\n\t\t\t\"separator\": \"sep\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: ``,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{\"name\": \"foo\"},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"tags\":{\"name\":\"foo\"},\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t},\n\t},\n\t\"write_tags_some\": {\n\t\tprocessorType: processorType,\n\t\tprocessor: map[string]interface{}{\n\t\t\t\"tag-names\": []string{\"^name\"},\n\t\t\t\"separator\": \"sep\",\n\t\t},\n\t\ttests: []item{\n\t\t\t{\n\t\t\t\tinput:  nil,\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{}},\n\t\t\t\toutput: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: ``,\n\t\t\t},\n\t\t\t{\n\t\t\t\tinput: &formatters.EventMsg{\n\t\t\t\t\tTags:   map[string]string{\"name\": \"foo\"},\n\t\t\t\t\tValues: map[string]interface{}{\"number\": \"42\"}},\n\t\t\t\toutput: `{\"tags\":{\"name\":\"foo\"},\"values\":{\"number\":\"42\"}}sep`,\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestEventWrite(t *testing.T) {\n\tfor name, ts := range testset {\n\t\tp := &write{logger: log.New(io.Discard, \"\", 0)}\n\t\terr := p.Init(ts.processor)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to initialize processors: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"initialized for test %s: %+v\", name, p)\n\t\tfor i, item := range ts.tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tbuff := new(bytes.Buffer)\n\t\t\t\tp.dst = buff\n\t\t\t\tt.Logf(\"running '%s' test item %d\", name, i)\n\t\t\t\tp.Apply(item.input)\n\t\t\t\tif buff.String() != item.output {\n\t\t\t\t\tt.Errorf(\"failed at %s item %d, expected %+v, got: %+v\", name, i, item.output, buff.String())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/flat.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"errors\"\n\t\"path/filepath\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n)\n\nfunc ResponsesFlat(msgs ...proto.Message) (map[string]interface{}, error) {\n\trs := make(map[string]interface{})\n\tfor _, msg := range msgs {\n\t\tmr, err := responseFlat(msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor k, v := range mr {\n\t\t\trs[k] = v\n\t\t}\n\t}\n\treturn rs, nil\n}\n\nfunc responseFlat(msg proto.Message) (map[string]interface{}, error) {\n\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\tcase *gnmi.GetResponse:\n\t\trs := make(map[string]interface{})\n\t\tfor _, n := range msg.GetNotification() {\n\t\t\tprefix := path.GnmiPathToXPath(n.GetPrefix(), false)\n\t\t\tfor _, u := range n.GetUpdate() {\n\t\t\t\tp := path.GnmiPathToXPath(u.GetPath(), false)\n\t\t\t\t// If there is no prefix whatsoever, prepend\n\t\t\t\t// leading slash to the path\n\t\t\t\tif prefix == \"\" {\n\t\t\t\t\tp = filepath.Join(\"/\", p)\n\t\t\t\t}\n\t\t\t\t// If a prefix is populated without an origin,\n\t\t\t\t// prepend leading slash to the prefix\n\t\t\t\tif n.GetPrefix().GetOrigin() == \"\" && n.GetPrefix().GetElem() != nil {\n\t\t\t\t\tprefix = filepath.Join(\"/\", prefix)\n\t\t\t\t}\n\t\t\t\tvmap, err := getValueFlat(filepath.Join(prefix, p), u.GetVal())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif len(vmap) == 0 {\n\t\t\t\t\trs[p] = \"{}\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor p, v := range vmap {\n\t\t\t\t\trs[p] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn rs, nil\n\tcase *gnmi.SubscribeResponse:\n\t\trs := make(map[string]interface{})\n\t\tn := msg.GetUpdate()\n\t\tif n != nil {\n\t\t\tprefix := path.GnmiPathToXPath(n.GetPrefix(), false)\n\t\t\tfor _, u := range n.GetUpdate() {\n\t\t\t\tp := path.GnmiPathToXPath(u.GetPath(), false)\n\t\t\t\t// If there is no prefix whatsoever, prepend\n\t\t\t\t// leading slash to the path\n\t\t\t\tif prefix == \"\" {\n\t\t\t\t\tp = filepath.Join(\"/\", p)\n\t\t\t\t}\n\t\t\t\t// If a prefix is populated without an origin,\n\t\t\t\t// prepend leading slash to the prefix\n\t\t\t\tif n.GetPrefix().GetOrigin() == \"\" && n.GetPrefix().GetElem() != nil {\n\t\t\t\t\tprefix = filepath.Join(\"/\", prefix)\n\t\t\t\t}\n\t\t\t\tvmap, err := getValueFlat(filepath.Join(prefix, p), u.GetVal())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif len(vmap) == 0 {\n\t\t\t\t\trs[p] = \"{}\"\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor p, v := range vmap {\n\t\t\t\t\trs[p] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn rs, nil\n\t}\n\treturn nil, errors.New(\"unsupported message type\")\n}\n"
  },
  {
    "path": "pkg/formatters/formats.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/encoding/protojson\"\n\t\"google.golang.org/protobuf/encoding/prototext\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n)\n\ntype MarshalOptions struct {\n\tMultiline            bool\n\tIndent               string\n\tFormat               string\n\tOverrideTS           bool\n\tValuesOnly           bool\n\tCalculateLatency     bool\n\tRegisteredExtensions utils.RegisteredExtensions\n\tProtoFiles           []string\n\tProtoDir             []string\n}\n\n// Marshal //\nfunc (o *MarshalOptions) Marshal(msg proto.Message, meta map[string]string, eps ...EventProcessor) ([]byte, error) {\n\tmsg = o.OverrideTimestamp(msg)\n\tswitch o.Format {\n\tdefault: // json\n\t\treturn o.FormatJSON(msg, meta)\n\tcase \"proto\":\n\t\treturn proto.Marshal(msg)\n\tcase \"protojson\":\n\t\treturn protojson.MarshalOptions{Multiline: o.Multiline, Indent: o.Indent}.Marshal(msg)\n\tcase \"prototext\":\n\t\treturn prototext.MarshalOptions{Multiline: o.Multiline, Indent: o.Indent}.Marshal(msg)\n\tcase \"event\":\n\t\tb := make([]byte, 0)\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tvar subscriptionName string\n\t\t\tvar ok bool\n\t\t\tif subscriptionName, ok = meta[\"subscription-name\"]; !ok {\n\t\t\t\tsubscriptionName = \"default\"\n\t\t\t}\n\t\t\tswitch msg.GetResponse().(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tevents, err := ResponseToEventMsgs(subscriptionName, msg, meta, eps...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed converting response to events: %v\", err)\n\t\t\t\t}\n\t\t\t\tif len(events) == 0 {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\tif o.Multiline {\n\t\t\t\t\tb, err = jsonMarshalIndent(events, \"\", o.Indent)\n\t\t\t\t} else {\n\t\t\t\t\tb, err = jsonMarshal(events)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed marshaling format 'event': %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn b, nil\n\t\tcase *gnmi.GetResponse:\n\t\t\tevents, err := GetResponseToEventMsgs(msg, meta, eps...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed converting response to events: %v\", err)\n\t\t\t}\n\n\t\t\tif o.Multiline {\n\t\t\t\tb, err = jsonMarshalIndent(events, \"\", o.Indent)\n\t\t\t} else {\n\t\t\t\tb, err = jsonMarshal(events)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed marshaling format 'event': %v\", err)\n\t\t\t}\n\t\t\treturn b, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"format 'event' not supported for msg type %T\", msg.ProtoReflect().Interface())\n\t\t}\n\tcase \"flat\":\n\t\tflatMsg, err := responseFlat(msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmsgLen := len(flatMsg)\n\t\tif msgLen == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tsortedPaths := make([]string, 0, msgLen)\n\t\tfor k := range flatMsg {\n\t\t\tsortedPaths = append(sortedPaths, k)\n\t\t}\n\t\tsort.Strings(sortedPaths)\n\n\t\tbuf := new(bytes.Buffer)\n\t\tfor _, p := range sortedPaths {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%s: %v\\n\", p, flatMsg[p]))\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n}\n\nfunc (o *MarshalOptions) OverrideTimestamp(msg proto.Message) proto.Message {\n\tif o.OverrideTS {\n\t\tts := time.Now().UnixNano()\n\t\tswitch msg := msg.ProtoReflect().Interface().(type) {\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tswitch msg.GetResponse().(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tupd := msg.GetUpdate()\n\t\t\t\tif upd != nil {\n\t\t\t\t\tupd.Timestamp = ts\n\t\t\t\t}\n\t\t\t\treturn msg\n\t\t\t}\n\t\t}\n\t}\n\treturn msg\n}\n"
  },
  {
    "path": "pkg/formatters/json.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/fullstorydev/grpcurl\"\n\t\"github.com/jhump/protoreflect/dynamic\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/utils\"\n)\n\nvar bytesBufferPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(bytes.Buffer)\n\t},\n}\n\n// jsonMarshal encodes v to JSON without HTML-escaping '<', '>', or '&'.\nfunc jsonMarshal(v any) ([]byte, error) {\n\tbuf := bytesBufferPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbuf.Reset()\n\t\tbytesBufferPool.Put(buf)\n\t}()\n\tenc := json.NewEncoder(buf)\n\tenc.SetEscapeHTML(false)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\tresult := bytes.TrimRight(buf.Bytes(), \"\\n\")\n\tout := make([]byte, len(result))\n\tcopy(out, result)\n\treturn out, nil\n}\n\n// jsonMarshalIndent is like jsonMarshal but applies indented formatting.\nfunc jsonMarshalIndent(v any, prefix, indent string) ([]byte, error) {\n\tbuf := bytesBufferPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbuf.Reset()\n\t\tbytesBufferPool.Put(buf)\n\t}()\n\tenc := json.NewEncoder(buf)\n\tenc.SetEscapeHTML(false)\n\tenc.SetIndent(prefix, indent)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\tresult := bytes.TrimRight(buf.Bytes(), \"\\n\")\n\tout := make([]byte, len(result))\n\tcopy(out, result)\n\treturn out, nil\n}\n\nfunc formatRegisteredExtensions(\n\textensions []*gnmi_ext.Extension,\n\tprotoDir,\n\tprotoFiles []string,\n\textensionDecodeMap utils.RegisteredExtensions,\n) (map[int32]decodedExtension, error) {\n\tdecodedExtensions := map[int32]decodedExtension{}\n\n\tif len(extensions) == 0 {\n\t\treturn decodedExtensions, nil\n\t}\n\n\tif len(protoFiles) == 0 {\n\t\treturn decodedExtensions, nil\n\t}\n\n\tdescSource, err := grpcurl.DescriptorSourceFromProtoFiles(protoDir, protoFiles...)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ext := range extensions {\n\t\trext := ext.GetRegisteredExt()\n\n\t\tif rext == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tid := int32(rext.Id)\n\t\tmsg, exists := extensionDecodeMap[id]\n\n\t\tif !exists {\n\t\t\tcontinue\n\t\t}\n\n\t\tdesc, err := descSource.FindSymbol(msg)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpm := dynamic.NewMessage(desc.GetFile().FindMessage(msg))\n\n\t\tif err = pm.Unmarshal(rext.Msg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsondata, err := pm.MarshalJSON()\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmsgJson := map[string]any{}\n\n\t\tif err = json.Unmarshal(jsondata, &msgJson); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdecodedExtensions[id] = msgJson\n\t}\n\n\treturn decodedExtensions, nil\n}\n\n// FormatJSON formats a proto.Message and returns a []byte and an error\nfunc (o *MarshalOptions) FormatJSON(m proto.Message, meta map[string]string) ([]byte, error) {\n\tif m == nil {\n\t\treturn nil, nil\n\t}\n\tswitch m := m.ProtoReflect().Interface().(type) {\n\tcase *gnmi.CapabilityRequest:\n\t\treturn o.formatCapabilitiesRequest(m)\n\tcase *gnmi.CapabilityResponse:\n\t\treturn o.formatCapabilitiesResponse(m)\n\tcase *gnmi.GetRequest:\n\t\treturn o.formatGetRequest(m)\n\tcase *gnmi.GetResponse:\n\t\treturn o.formatGetResponse(m, meta)\n\tcase *gnmi.SetRequest:\n\t\treturn o.formatSetRequest(m)\n\tcase *gnmi.SetResponse:\n\t\treturn o.formatSetResponse(m, meta)\n\tcase *gnmi.SubscribeRequest:\n\t\treturn o.formatSubscribeRequest(m)\n\tcase *gnmi.SubscribeResponse:\n\t\treturn o.formatSubscribeResponse(m, meta)\n\t}\n\treturn nil, nil\n}\n\nfunc (o *MarshalOptions) formatSubscribeRequest(m *gnmi.SubscribeRequest) ([]byte, error) {\n\tmsg := subscribeReq{}\n\tswitch m := m.Request.(type) {\n\tcase *gnmi.SubscribeRequest_Subscribe:\n\t\tmsg.Subscribe.Prefix = path.GnmiPathToXPath(m.Subscribe.GetPrefix(), false)\n\t\tmsg.Subscribe.Target = m.Subscribe.GetPrefix().GetTarget()\n\t\tmsg.Subscribe.Subscriptions = make([]subscription, 0, len(m.Subscribe.GetSubscription()))\n\t\tif m.Subscribe != nil {\n\t\t\tmsg.Subscribe.AllowAggregation = m.Subscribe.AllowAggregation\n\t\t\tmsg.Subscribe.UpdatesOnly = m.Subscribe.UpdatesOnly\n\t\t\tmsg.Subscribe.Encoding = m.Subscribe.Encoding.String()\n\t\t\tmsg.Subscribe.Mode = m.Subscribe.Mode.String()\n\t\t\tif m.Subscribe.Qos != nil {\n\t\t\t\tmsg.Subscribe.Qos = m.Subscribe.GetQos().GetMarking()\n\t\t\t}\n\t\t\tfor _, sub := range m.Subscribe.Subscription {\n\t\t\t\tmsg.Subscribe.Subscriptions = append(msg.Subscribe.Subscriptions,\n\t\t\t\t\tsubscription{\n\t\t\t\t\t\tPath:              path.GnmiPathToXPath(sub.Path, false),\n\t\t\t\t\t\tMode:              sub.GetMode().String(),\n\t\t\t\t\t\tSampleInterval:    sub.SampleInterval,\n\t\t\t\t\t\tHeartbeatInterval: sub.HeartbeatInterval,\n\t\t\t\t\t\tSuppressRedundant: sub.SuppressRedundant,\n\t\t\t\t\t})\n\t\t\t}\n\t\t}\n\tcase *gnmi.SubscribeRequest_Poll:\n\t\tmsg.Poll = new(poll)\n\t}\n\tif len(m.GetExtension()) > 0 {\n\t\tmsg.Extensions = m.GetExtension()\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(msg)\n}\n\nfunc (o *MarshalOptions) formatSubscribeResponse(m *gnmi.SubscribeResponse, meta map[string]string) ([]byte, error) {\n\tdext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch mr := m.GetResponse().(type) {\n\tdefault:\n\t\tif len(m.GetExtension()) > 0 {\n\n\t\t\tmsg := notificationRspMsg{\n\t\t\t\tExtensions:        m.GetExtension(),\n\t\t\t\tDecodedExtensions: dext,\n\t\t\t}\n\t\t\tif o.Multiline {\n\t\t\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t\t\t}\n\t\t\treturn jsonMarshal(msg)\n\t\t}\n\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\tmsg := &syncResponseMsg{\n\t\t\tSyncResponse:      mr.SyncResponse,\n\t\t\tExtensions:        m.GetExtension(),\n\t\t\tDecodedExtensions: dext,\n\t\t}\n\t\tif o.Multiline {\n\t\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t\t}\n\t\treturn jsonMarshal(msg)\n\tcase *gnmi.SubscribeResponse_Update:\n\t\tmsg := notificationRspMsg{\n\t\t\tTimestamp: mr.Update.Timestamp,\n\t\t}\n\t\tt := time.Unix(0, mr.Update.Timestamp)\n\t\tmsg.Time = &t\n\t\tif o.CalculateLatency {\n\t\t\tmsg.RecvTimestamp = time.Now().UnixNano()\n\t\t\trt := time.Unix(0, msg.RecvTimestamp)\n\t\t\tmsg.RecvTime = &rt\n\t\t\tmsg.LatencyNano = msg.RecvTimestamp - msg.Timestamp\n\t\t\tmsg.LatencyMilli = msg.LatencyNano / 1000 / 1000\n\t\t}\n\t\tif meta == nil {\n\t\t\tmeta = make(map[string]string)\n\t\t}\n\t\tmsg.Prefix = path.GnmiPathToXPath(mr.Update.GetPrefix(), false)\n\t\tmsg.Target = mr.Update.Prefix.GetTarget()\n\t\tif s, ok := meta[\"source\"]; ok {\n\t\t\tmsg.Source = s\n\t\t}\n\t\tif s, ok := meta[\"system-name\"]; ok {\n\t\t\tmsg.SystemName = s\n\t\t}\n\t\tif s, ok := meta[\"subscription-name\"]; ok {\n\t\t\tmsg.SubscriptionName = s\n\t\t}\n\t\tfor i, upd := range mr.Update.Update {\n\t\t\tif upd.Path == nil {\n\t\t\t\tupd.Path = new(gnmi.Path)\n\t\t\t}\n\t\t\tpathElems := make([]string, 0, len(upd.Path.Elem))\n\t\t\tfor _, pElem := range upd.Path.Elem {\n\t\t\t\tpathElems = append(pathElems, pElem.GetName())\n\t\t\t}\n\t\t\tvalue, err := getValue(upd.Val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmsg.Updates = append(msg.Updates,\n\t\t\t\tupdate{\n\t\t\t\t\tPath:   path.GnmiPathToXPath(upd.Path, false),\n\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t})\n\t\t\tmsg.Updates[i].Values[strings.Join(pathElems, \"/\")] = value\n\t\t}\n\t\tfor _, del := range mr.Update.Delete {\n\t\t\tmsg.Deletes = append(msg.Deletes, path.GnmiPathToXPath(del, false))\n\t\t}\n\t\tif len(m.GetExtension()) > 0 {\n\t\t\tmsg.Extensions = m.GetExtension()\n\t\t\tmsg.DecodedExtensions = dext\n\t\t}\n\t\tif o.Multiline {\n\t\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t\t}\n\t\treturn jsonMarshal(msg)\n\t}\n\treturn nil, nil\n}\n\nfunc (o *MarshalOptions) formatCapabilitiesRequest(m *gnmi.CapabilityRequest) ([]byte, error) {\n\tcapReq := capRequest{\n\t\tExtensions: m.Extension,\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(capReq, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(capReq)\n}\n\nfunc (o *MarshalOptions) formatCapabilitiesResponse(m *gnmi.CapabilityResponse) ([]byte, error) {\n\tcapRspMsg := capResponse{\n\t\tExtensions: m.Extension,\n\t}\n\tcapRspMsg.Version = m.GetGNMIVersion()\n\tfor _, sm := range m.SupportedModels {\n\t\tcapRspMsg.SupportedModels = append(capRspMsg.SupportedModels,\n\t\t\tmodel{\n\t\t\t\tName:         sm.GetName(),\n\t\t\t\tOrganization: sm.GetOrganization(),\n\t\t\t\tVersion:      sm.GetVersion(),\n\t\t\t})\n\t}\n\tfor _, se := range m.SupportedEncodings {\n\t\tcapRspMsg.Encodings = append(capRspMsg.Encodings, se.String())\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(capRspMsg, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(capRspMsg)\n}\n\nfunc (o *MarshalOptions) formatGetRequest(m *gnmi.GetRequest) ([]byte, error) {\n\tmsg := getRqMsg{\n\t\tPrefix:     path.GnmiPathToXPath(m.GetPrefix(), false),\n\t\tTarget:     m.GetPrefix().GetTarget(),\n\t\tPaths:      make([]string, 0, len(m.Path)),\n\t\tEncoding:   m.GetEncoding().String(),\n\t\tDataType:   m.GetType().String(),\n\t\tExtensions: m.Extension,\n\t}\n\tfor _, p := range m.Path {\n\t\tmsg.Paths = append(msg.Paths, path.GnmiPathToXPath(p, false))\n\t}\n\tfor _, um := range m.UseModels {\n\t\tmsg.Models = append(msg.Models,\n\t\t\tmodel{\n\t\t\t\tName:         um.GetName(),\n\t\t\t\tOrganization: um.GetOrganization(),\n\t\t\t\tVersion:      um.GetVersion(),\n\t\t\t})\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(msg)\n}\n\nfunc (o *MarshalOptions) formatGetResponse(m *gnmi.GetResponse, meta map[string]string) ([]byte, error) {\n\tdext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgetRsp := getRspMsg{\n\t\tNotifications:     make([]notificationRspMsg, 0, len(m.GetNotification())),\n\t\tExtensions:        m.GetExtension(),\n\t\tDecodedExtensions: dext,\n\t}\n\n\tfor _, notif := range m.GetNotification() {\n\t\tmsg := notificationRspMsg{\n\t\t\tPrefix:  path.GnmiPathToXPath(notif.GetPrefix(), false),\n\t\t\tUpdates: make([]update, 0, len(notif.GetUpdate())),\n\t\t\tDeletes: make([]string, 0, len(notif.GetDelete())),\n\t\t}\n\t\tmsg.Timestamp = notif.Timestamp\n\t\tt := time.Unix(0, notif.Timestamp)\n\t\tmsg.Time = &t\n\t\tif o.CalculateLatency && !o.ValuesOnly {\n\t\t\tmsg.RecvTimestamp = time.Now().UnixNano()\n\t\t\trt := time.Unix(0, msg.RecvTimestamp)\n\t\t\tmsg.RecvTime = &rt\n\t\t\tmsg.LatencyNano = msg.RecvTimestamp - msg.Timestamp\n\t\t\tmsg.LatencyMilli = msg.LatencyNano / 1000 / 1000\n\t\t}\n\t\tif meta == nil {\n\t\t\tmeta = make(map[string]string)\n\t\t}\n\t\tmsg.Prefix = path.GnmiPathToXPath(notif.GetPrefix(), false)\n\t\tmsg.Target = notif.GetPrefix().GetTarget()\n\t\tif s, ok := meta[\"source\"]; ok {\n\t\t\tmsg.Source = s\n\t\t}\n\t\tfor i, upd := range notif.GetUpdate() {\n\t\t\tpathElems := make([]string, 0, len(upd.GetPath().GetElem()))\n\t\t\tfor _, pElem := range upd.GetPath().GetElem() {\n\t\t\t\tpathElems = append(pathElems, pElem.GetName())\n\t\t\t}\n\t\t\tvalue, err := getValue(upd.GetVal())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmsg.Updates = append(msg.Updates,\n\t\t\t\tupdate{\n\t\t\t\t\tPath:   path.GnmiPathToXPath(upd.GetPath(), false),\n\t\t\t\t\tValues: make(map[string]interface{}),\n\t\t\t\t})\n\t\t\tmsg.Updates[i].Values[strings.Join(pathElems, \"/\")] = value\n\t\t}\n\t\tfor _, del := range notif.GetDelete() {\n\t\t\tmsg.Deletes = append(msg.Deletes, path.GnmiPathToXPath(del, false))\n\t\t}\n\t\tgetRsp.Notifications = append(getRsp.Notifications, msg)\n\t}\n\n\tif o.ValuesOnly {\n\t\tresult := make([]interface{}, 0, len(getRsp.Notifications))\n\t\tfor _, n := range getRsp.Notifications {\n\t\t\tfor _, u := range n.Updates {\n\t\t\t\tfor _, v := range u.Values {\n\t\t\t\t\tresult = append(result, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn jsonMarshalIndent(result, \"\", \"  \")\n\t}\n\tvar data any\n\tif len(getRsp.Extensions) > 0 {\n\t\tdata = getRsp\n\t} else {\n\t\tdata = getRsp.Notifications\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(data, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(data)\n}\n\nfunc (o *MarshalOptions) formatSetRequest(m *gnmi.SetRequest) ([]byte, error) {\n\treq := setReqMsg{\n\t\tPrefix:     path.GnmiPathToXPath(m.GetPrefix(), false),\n\t\tTarget:     m.GetPrefix().GetTarget(),\n\t\tDelete:     make([]string, 0, len(m.GetDelete())),\n\t\tReplace:    make([]updateMsg, 0, len(m.GetReplace())),\n\t\tUpdate:     make([]updateMsg, 0, len(m.GetUpdate())),\n\t\tExtensions: m.GetExtension(),\n\t}\n\n\tfor _, del := range m.GetDelete() {\n\t\tp := path.GnmiPathToXPath(del, false)\n\t\treq.Delete = append(req.Delete, p)\n\t}\n\n\tfor _, upd := range m.GetReplace() {\n\t\treq.Replace = append(req.Replace, updateMsg{\n\t\t\tPath: path.GnmiPathToXPath(upd.GetPath(), false),\n\t\t\tVal:  upd.Val.String(),\n\t\t})\n\t}\n\n\tfor _, upd := range m.GetUpdate() {\n\t\treq.Update = append(req.Update, updateMsg{\n\t\t\tPath: path.GnmiPathToXPath(upd.GetPath(), false),\n\t\t\tVal:  upd.Val.String(),\n\t\t})\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(req, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(req)\n}\n\nfunc (o *MarshalOptions) formatSetResponse(m *gnmi.SetResponse, meta map[string]string) ([]byte, error) {\n\tdext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsg := setRspMsg{\n\t\tPrefix:            path.GnmiPathToXPath(m.GetPrefix(), false),\n\t\tTarget:            m.GetPrefix().GetTarget(),\n\t\tTimestamp:         m.GetTimestamp(),\n\t\tTime:              time.Unix(0, m.Timestamp),\n\t\tExtensions:        m.GetExtension(),\n\t\tDecodedExtensions: dext,\n\t}\n\tif meta == nil {\n\t\tmeta = make(map[string]string)\n\t}\n\tmsg.Results = make([]updateResultMsg, 0, len(m.GetResponse()))\n\tif s, ok := meta[\"source\"]; ok {\n\t\tmsg.Source = s\n\t}\n\tfor _, u := range m.GetResponse() {\n\t\tmsg.Results = append(msg.Results, updateResultMsg{\n\t\t\tOperation: u.Op.String(),\n\t\t\tPath:      path.GnmiPathToXPath(u.GetPath(), false),\n\t\t\tTarget:    u.GetPath().GetTarget(),\n\t\t})\n\t}\n\tif o.Multiline {\n\t\treturn jsonMarshalIndent(msg, \"\", o.Indent)\n\t}\n\treturn jsonMarshal(msg)\n}\n"
  },
  {
    "path": "pkg/formatters/msg.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/proto/gnmi_ext\"\n)\n\ntype syncResponseMsg struct {\n\tSyncResponse      bool                       `json:\"sync-response,omitempty\"`\n\tExtensions        []*gnmi_ext.Extension      `json:\"extensions,omitempty\"`\n\tDecodedExtensions map[int32]decodedExtension `json:\"decodedExtensions,omitempty\"`\n}\n\ntype notificationRspMsg struct {\n\tMeta              map[string]interface{}     `json:\"meta,omitempty\"`\n\tSource            string                     `json:\"source,omitempty\"`\n\tSystemName        string                     `json:\"system-name,omitempty\"`\n\tSubscriptionName  string                     `json:\"subscription-name,omitempty\"`\n\tTimestamp         int64                      `json:\"timestamp,omitempty\"`\n\tTime              *time.Time                 `json:\"time,omitempty\"`\n\tRecvTimestamp     int64                      `json:\"recv-timestamp,omitempty\"`\n\tRecvTime          *time.Time                 `json:\"recv-time,omitempty\"`\n\tLatencyNano       int64                      `json:\"latency-nano,omitempty\"`\n\tLatencyMilli      int64                      `json:\"latency-milli,omitempty\"`\n\tPrefix            string                     `json:\"prefix,omitempty\"`\n\tTarget            string                     `json:\"target,omitempty\"`\n\tUpdates           []update                   `json:\"updates,omitempty\"`\n\tDeletes           []string                   `json:\"deletes,omitempty\"`\n\tExtensions        []*gnmi_ext.Extension      `json:\"extensions,omitempty\"`\n\tDecodedExtensions map[int32]decodedExtension `json:\"decodedExtensions,omitempty\"`\n}\ntype update struct {\n\tPath   string\n\tValues map[string]interface{} `json:\"values,omitempty\"`\n}\ntype capRequest struct {\n\tExtensions []*gnmi_ext.Extension `json:\"extensions,omitempty\"`\n}\ntype capResponse struct {\n\tVersion         string                `json:\"version,omitempty\"`\n\tSupportedModels []model               `json:\"supported-models,omitempty\"`\n\tEncodings       []string              `json:\"encodings,omitempty\"`\n\tExtensions      []*gnmi_ext.Extension `json:\"extensions,omitempty\"`\n}\ntype model struct {\n\tName         string `json:\"name,omitempty\"`\n\tOrganization string `json:\"organization,omitempty\"`\n\tVersion      string `json:\"version,omitempty\"`\n}\n\ntype getRqMsg struct {\n\tPrefix     string                `json:\"prefix,omitempty\"`\n\tTarget     string                `json:\"target,omitempty\"`\n\tPaths      []string              `json:\"paths,omitempty\"`\n\tEncoding   string                `json:\"encoding,omitempty\"`\n\tDataType   string                `json:\"data-type,omitempty\"`\n\tModels     []model               `json:\"models,omitempty\"`\n\tExtensions []*gnmi_ext.Extension `json:\"extensions,omitempty\"`\n}\n\ntype decodedExtension map[string]any\n\ntype getRspMsg struct {\n\tNotifications     []notificationRspMsg       `json:\"notifications,omitempty\"`\n\tExtensions        []*gnmi_ext.Extension      `json:\"extensions,omitempty\"`\n\tDecodedExtensions map[int32]decodedExtension `json:\"decodedExtensions,omitempty\"`\n}\ntype setRspMsg struct {\n\tSource            string                     `json:\"source,omitempty\"`\n\tTimestamp         int64                      `json:\"timestamp,omitempty\"`\n\tTime              time.Time                  `json:\"time,omitempty\"`\n\tPrefix            string                     `json:\"prefix,omitempty\"`\n\tTarget            string                     `json:\"target,omitempty\"`\n\tResults           []updateResultMsg          `json:\"results,omitempty\"`\n\tExtensions        []*gnmi_ext.Extension      `json:\"extensions,omitempty\"`\n\tDecodedExtensions map[int32]decodedExtension `json:\"decodedExtensions,omitempty\"`\n}\n\ntype updateResultMsg struct {\n\tOperation string `json:\"operation,omitempty\"`\n\tPath      string `json:\"path,omitempty\"`\n\tTarget    string `json:\"target,omitempty\"`\n}\n\ntype setReqMsg struct {\n\tPrefix     string                `json:\"prefix,omitempty\"`\n\tTarget     string                `json:\"target,omitempty\"`\n\tDelete     []string              `json:\"delete,omitempty\"`\n\tReplace    []updateMsg           `json:\"replace,omitempty\"`\n\tUpdate     []updateMsg           `json:\"update,omitempty\"`\n\tExtensions []*gnmi_ext.Extension `json:\"extensions,omitempty\"`\n}\n\ntype updateMsg struct {\n\tPath string `json:\"path,omitempty\"`\n\tVal  string `json:\"val,omitempty\"`\n}\n\ntype subscribeReq struct {\n\tSubscribe  subscribe             `json:\"subscribe,omitempty\"`\n\tPoll       *poll                 `json:\"poll,omitempty\"`\n\tAliases    map[string]string     `json:\"aliases,omitempty\"`\n\tExtensions []*gnmi_ext.Extension `json:\"extensions,omitempty\"`\n}\n\ntype poll struct{}\n\ntype subscribe struct {\n\tTarget           string         `json:\"target,omitempty\"`\n\tPrefix           string         `json:\"prefix,omitempty\"`\n\tSubscriptions    []subscription `json:\"subscriptions,omitempty\"`\n\tUseAliases       bool           `json:\"use-aliases,omitempty\"`\n\tQos              uint32         `json:\"qos,omitempty\"`\n\tMode             string         `json:\"mode,omitempty\"`\n\tAllowAggregation bool           `json:\"allow-aggregation,omitempty\"`\n\tUseModels        []model        `json:\"use-models,omitempty\"`\n\tEncoding         string         `json:\"encoding,omitempty\"`\n\tUpdatesOnly      bool           `json:\"updates-only,omitempty\"`\n}\n\ntype subscription struct {\n\tPath              string `json:\"path,omitempty\"`\n\tMode              string `json:\"mode,omitempty\"`\n\tSampleInterval    uint64 `json:\"sample-interval,omitempty\"`\n\tSuppressRedundant bool   `json:\"suppress-redundant,omitempty\"`\n\tHeartbeatInterval uint64 `json:\"heartbeat-interval,omitempty\"`\n}\n\nfunc getValue(updValue *gnmi.TypedValue) (interface{}, error) {\n\tif updValue == nil {\n\t\treturn nil, nil\n\t}\n\tvar value interface{}\n\tvar jsondata []byte\n\tswitch updValue.Value.(type) {\n\tcase *gnmi.TypedValue_AsciiVal:\n\t\tvalue = updValue.GetAsciiVal()\n\tcase *gnmi.TypedValue_BoolVal:\n\t\tvalue = updValue.GetBoolVal()\n\tcase *gnmi.TypedValue_BytesVal:\n\t\tvalue = updValue.GetBytesVal()\n\tcase *gnmi.TypedValue_DecimalVal:\n\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\tvalue = updValue.GetDecimalVal()\n\tcase *gnmi.TypedValue_FloatVal:\n\t\t//lint:ignore SA1019 still need GetFloatVal for backward compatibility\n\t\tvalue = updValue.GetFloatVal()\n\tcase *gnmi.TypedValue_DoubleVal:\n\t\tvalue = updValue.GetDoubleVal()\n\tcase *gnmi.TypedValue_IntVal:\n\t\tvalue = updValue.GetIntVal()\n\tcase *gnmi.TypedValue_StringVal:\n\t\tvalue = updValue.GetStringVal()\n\tcase *gnmi.TypedValue_UintVal:\n\t\tvalue = updValue.GetUintVal()\n\tcase *gnmi.TypedValue_JsonIetfVal:\n\t\tjsondata = updValue.GetJsonIetfVal()\n\tcase *gnmi.TypedValue_JsonVal:\n\t\tjsondata = updValue.GetJsonVal()\n\tcase *gnmi.TypedValue_LeaflistVal:\n\t\tvalue = updValue.GetLeaflistVal()\n\tcase *gnmi.TypedValue_ProtoBytes:\n\t\tvalue = updValue.GetProtoBytes()\n\tcase *gnmi.TypedValue_AnyVal:\n\t\tvalue = updValue.GetAnyVal()\n\t}\n\tif value == nil && len(jsondata) != 0 {\n\t\terr := json.Unmarshal(jsondata, &value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn value, nil\n}\n"
  },
  {
    "path": "pkg/formatters/plugin_manager/manager.go",
    "content": "package plugin_manager\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"sync\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/hashicorp/go-plugin\"\n\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/formatters/event_plugin\"\n)\n\nvar handshakeConfig = plugin.HandshakeConfig{\n\tProtocolVersion:  1,\n\tMagicCookieKey:   \"GNMIC_PLUGIN\",\n\tMagicCookieValue: \"gnmic\",\n}\n\ntype PluginManager struct {\n\tconfig    *config.PluginsConfig\n\tlogOutput io.Writer\n\n\tm             *sync.Mutex\n\tpluginClients []*plugin.Client\n\n\tlogger hclog.Logger\n}\n\nfunc New(pc *config.PluginsConfig, logOutput io.Writer) *PluginManager {\n\tpm := &PluginManager{\n\t\tconfig:        pc,\n\t\tlogOutput:     logOutput,\n\t\tm:             new(sync.Mutex),\n\t\tpluginClients: make([]*plugin.Client, 0),\n\t}\n\tpm.logger = hclog.New(\n\t\t&hclog.LoggerOptions{\n\t\t\tName:       \"plugin-manager\",\n\t\t\tLevel:      hclog.Info,\n\t\t\tOutput:     logOutput,\n\t\t\tTimeFormat: \"2006/01/02 15:04:05.999999\",\n\t\t},\n\t)\n\tif pc.Debug {\n\t\tpm.logger.SetLevel(hclog.Debug)\n\t}\n\treturn pm\n}\n\nfunc (p *PluginManager) Load() error {\n\tif p.config == nil {\n\t\treturn nil\n\t}\n\t// discover plugins in the supplied path\n\tpluginPaths, err := plugin.Discover(p.config.Glob, p.config.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize plugins clients and register plugin processors\n\tfor _, pluginPath := range pluginPaths {\n\t\tname := filepath.Base(pluginPath)\n\t\tformatters.EventProcessorTypes = append(formatters.EventProcessorTypes, name)\n\t\tformatters.Register(name, p.initProcessorFn(name, pluginPath))\n\t}\n\n\treturn nil\n}\n\nfunc (p *PluginManager) Cleanup() {\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\tfor _, client := range p.pluginClients {\n\t\tclient.Kill()\n\t}\n}\n\nfunc (p *PluginManager) initProcessorFn(name, pluginPath string) func() formatters.EventProcessor {\n\treturn func() formatters.EventProcessor {\n\t\tclient := plugin.NewClient(&plugin.ClientConfig{\n\t\t\tHandshakeConfig: handshakeConfig,\n\t\t\tPlugins:         map[string]plugin.Plugin{name: &event_plugin.EventProcessorPlugin{}},\n\t\t\tCmd:             exec.Command(pluginPath),\n\t\t\tStartTimeout:    p.config.StartTimeout,\n\t\t\tSyncStdout:      p.logOutput,\n\t\t\tSyncStderr:      p.logOutput,\n\t\t\tLogger:          p.logger,\n\t\t})\n\t\tp.m.Lock()\n\t\tp.pluginClients = append(p.pluginClients, client)\n\t\tp.m.Unlock()\n\t\trpcClient, err := client.Client()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to initialize plugin processor %s: %v\\n\", name, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\traw, err := rpcClient.Dispense(name)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to dispense plugin processor %s: %v\\n\", name, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\teventPlugin, ok := raw.(formatters.EventProcessor)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"plugin %s dispensed an unexpected interface: %T\", name, raw)\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn eventPlugin\n\t}\n}\n"
  },
  {
    "path": "pkg/formatters/processors.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/itchyny/gojq\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nvar EventProcessors = map[string]Initializer{}\n\nvar EventProcessorTypes = []string{\n\t\"event-add-tag\",\n\t\"event-allow\",\n\t\"event-combine\",\n\t\"event-convert\",\n\t\"event-data-convert\",\n\t\"event-date-string\",\n\t\"event-delete\",\n\t\"event-drop\",\n\t\"event-duration-convert\",\n\t\"event-extract-tags\",\n\t\"event-group-by\",\n\t\"event-ieeefloat32\",\n\t\"event-jq\",\n\t\"event-merge\",\n\t\"event-override-ts\",\n\t\"event-rate-limit\",\n\t\"event-starlark\",\n\t\"event-strings\",\n\t\"event-time-epoch\",\n\t\"event-to-tag\",\n\t\"event-trigger\",\n\t\"event-value-tag\",\n\t\"event-value-tag-v2\",\n\t\"event-write\",\n}\n\ntype Initializer func() EventProcessor\n\nfunc Register(name string, initFn Initializer) {\n\tEventProcessors[name] = initFn\n}\n\ntype Option func(EventProcessor)\n\ntype EventProcessor interface {\n\tInit(interface{}, ...Option) error\n\tApply(...*EventMsg) []*EventMsg\n\n\tWithTargets(map[string]*types.TargetConfig)\n\tWithLogger(l *log.Logger)\n\tWithActions(act map[string]map[string]interface{})\n\tWithProcessors(procs map[string]map[string]any)\n}\n\nfunc DecodeConfig(src, dst interface{}) error {\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     dst,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn decoder.Decode(src)\n}\n\nfunc WithLogger(l *log.Logger) Option {\n\treturn func(p EventProcessor) {\n\t\tp.WithLogger(l)\n\t}\n}\n\nfunc WithTargets(tcs map[string]*types.TargetConfig) Option {\n\treturn func(p EventProcessor) {\n\t\tp.WithTargets(tcs)\n\t}\n}\n\nfunc WithActions(acts map[string]map[string]interface{}) Option {\n\treturn func(p EventProcessor) {\n\t\tp.WithActions(acts)\n\t}\n}\n\nfunc WithProcessors(procs map[string]map[string]interface{}) Option {\n\treturn func(p EventProcessor) {\n\t\tp.WithProcessors(procs)\n\t}\n}\n\nfunc CheckCondition(code *gojq.Code, e *EventMsg) (bool, error) {\n\tif code == nil {\n\t\treturn true, nil\n\t}\n\n\tvar res interface{}\n\n\tinput := make(map[string]interface{})\n\tb, err := json.Marshal(e)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\terr = json.Unmarshal(b, &input)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\titer := code.Run(input)\n\tvar ok bool\n\tres, ok = iter.Next()\n\t// iterator not done, so the final result won't be a boolean\n\tif !ok {\n\t\treturn false, nil\n\t}\n\tif err, ok = res.(error); ok {\n\t\treturn false, err\n\t}\n\n\tswitch res := res.(type) {\n\tcase bool:\n\t\treturn res, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unexpected condition return type: %T | %v\", res, res)\n\t}\n}\n\nfunc MakeEventProcessors(\n\tlogger *log.Logger,\n\tprocessorNames []string,\n\tps map[string]map[string]any,\n\ttcs map[string]*types.TargetConfig,\n\tacts map[string]map[string]any,\n) ([]EventProcessor, error) {\n\tevps := make([]EventProcessor, len(processorNames))\n\tfor i, epName := range processorNames {\n\t\tif epCfg, ok := ps[epName]; ok {\n\t\t\tep, err := MakeProcessor(logger, epName, epCfg, ps, tcs, acts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tevps[i] = ep\n\t\t\tcontinue\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%q event processor not found\", epName)\n\t}\n\treturn evps, nil\n}\n\nfunc MakeProcessor(logger *log.Logger, name string,\n\tcfg map[string]any,\n\tps map[string]map[string]any,\n\ttcs map[string]*types.TargetConfig,\n\tacts map[string]map[string]any) (EventProcessor, error) {\n\tepType := \"\"\n\tfor k := range cfg {\n\t\tepType = k\n\t\tbreak\n\t}\n\tif in, ok := EventProcessors[epType]; ok {\n\t\tep := in()\n\t\terr := ep.Init(cfg[epType],\n\t\t\tWithLogger(logger),\n\t\t\tWithTargets(tcs),\n\t\t\tWithActions(acts),\n\t\t\tWithProcessors(ps),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed initializing event processor '%s' of type='%s': %w\", name, epType, err)\n\t\t}\n\t\tlogger.Printf(\"added event processor '%s' of type=%s to output\", name, epType)\n\t\treturn ep, nil\n\t}\n\treturn nil, fmt.Errorf(\"%q event processor has an unknown type=%q\", name, epType)\n}\n\ntype BaseProcessor struct {\n\tlogger *log.Logger\n}\n\nfunc (p *BaseProcessor) WithLogger(l *log.Logger) {\n\tp.logger = l\n}\n\nfunc (p *BaseProcessor) Init(interface{}, ...Option) error {\n\treturn nil\n}\n\nfunc (p *BaseProcessor) Apply(...*EventMsg) []*EventMsg {\n\treturn nil\n}\n\nfunc (p *BaseProcessor) WithTargets(map[string]*types.TargetConfig) {\n}\n\nfunc (p *BaseProcessor) WithActions(act map[string]map[string]interface{}) {\n}\n\nfunc (p *BaseProcessor) WithProcessors(procs map[string]map[string]any) {\n}\n"
  },
  {
    "path": "pkg/formatters/processors_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage formatters\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/itchyny/gojq\"\n)\n\nvar testset = map[string]struct {\n\tcondition string\n\tinput     []*EventMsg\n\tresult    bool\n}{\n\t\"always_true\": {\n\t\tcondition: \"any([true])\",\n\t\tinput: []*EventMsg{\n\t\t\t{\n\t\t\t\tName:      \"dummy1\",\n\t\t\t\tTimestamp: time.Now().Unix(),\n\t\t\t\tTags:      map[string]string{\"t1\": \"t1v\"},\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"path/dummy\": 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:      \"dummy2\",\n\t\t\t\tTimestamp: time.Now().Unix(),\n\t\t\t\tTags:      map[string]string{\"t1\": \"t1v\"},\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"path/dummy\": 1,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tresult: true,\n\t},\n}\n\nfunc TestCheckCondition(t *testing.T) {\n\tfor name, item := range testset {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Logf(\"running test item %s\", name)\n\t\t\tq, err := gojq.Parse(item.condition)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"condition parse failed :%v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tcode, err := gojq.Compile(q)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"query compile failed :%v\", err)\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tfor _, in := range item.input {\n\t\t\t\tok, err := CheckCondition(code, in)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Logf(\"check condition failed :%v\", err)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t\tif ok != item.result {\n\t\t\t\t\tt.Logf(\"failed at %q\", name)\n\t\t\t\t\tt.Logf(\"expected: (%T)%+v\", item.result, item.result)\n\t\t\t\t\tt.Logf(\"     got: (%T)%+v\", ok, ok)\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/gtemplate/template.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gtemplate\n\nimport (\n\t\"path\"\n\t\"text/template\"\n)\n\nfunc CreateTemplate(name, text string) (*template.Template, error) {\n\treturn template.New(name).\n\t\tOption(\"missingkey=zero\").\n\t\tFuncs(NewTemplateEngine().CreateFuncs()).\n\t\tParse(text)\n}\n\nfunc CreateFileTemplate(filename string) (*template.Template, error) {\n\tname := path.Base(filename)\n\n\ttpl, err := template.New(name).\n\t\tFuncs(NewTemplateEngine().CreateFuncs()).\n\t\tParseFiles(filename)\n\n\ttemplate.Must(tpl, err)\n\n\treturn tpl, err\n}\n"
  },
  {
    "path": "pkg/gtemplate/template_funcs.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gtemplate\n\nimport (\n\t\"context\"\n\t\"text/template\"\n\n\t\"github.com/hairyhenderson/gomplate/v3\"\n\t\"github.com/hairyhenderson/gomplate/v3/data\"\n)\n\ntype templateEngine interface {\n\tCreateFuncs() template.FuncMap\n}\n\nfunc NewTemplateEngine() templateEngine {\n\treturn &gmplt{}\n}\n\ntype gmplt struct{}\n\nfunc (*gmplt) CreateFuncs() template.FuncMap {\n\treturn gomplate.CreateFuncs(context.TODO(), new(data.Data))\n}\n"
  },
  {
    "path": "pkg/inputs/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/inputs/jetstream_input\"\n\t_ \"github.com/openconfig/gnmic/pkg/inputs/kafka_input\"\n\t_ \"github.com/openconfig/gnmic/pkg/inputs/nats_input\"\n)\n"
  },
  {
    "path": "pkg/inputs/input.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage inputs\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\tpkgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\ntype Input interface {\n\t// Start initializes the input and starts it.\n\tStart(context.Context, string, map[string]any, ...Option) error\n\t// Validate validates the input configuration.\n\tValidate(map[string]any) error\n\t// Update updates the input configuration in place for\n\t// a running input.\n\tUpdate(map[string]any) error\n\t// UpdateProcessor updates the named processor configuration\n\t// for a running input.\n\t// if the processor is not used by the Input, it will be ignored.\n\tUpdateProcessor(string, map[string]any) error\n\t// Close stops the input.\n\tClose() error\n}\n\ntype Initializer func() Input\n\nvar InputTypes = []string{\n\t\"nats\",\n\t\"kafka\",\n\t\"jetstream\",\n}\n\nvar Inputs = map[string]Initializer{}\n\nfunc Register(name string, initFn Initializer) {\n\tInputs[name] = initFn\n}\n\ntype InputOptions struct {\n\tLogger   *log.Logger\n\tOutputs  map[string]outputs.Output\n\tName     string\n\tStore    store.Store[any]\n\tPipeline chan *pipeline.Msg\n}\n\ntype PipeMessage interface {\n\tProto() proto.Message\n\tMeta() outputs.Meta\n\tEvents() []*formatters.EventMsg\n\tOutputs() map[string]struct{}\n}\n\ntype Option func(*InputOptions) error\n\nfunc WithLogger(logger *log.Logger) Option {\n\treturn func(i *InputOptions) error {\n\t\ti.Logger = logger\n\t\treturn nil\n\t}\n}\n\nfunc WithOutputs(outs map[string]outputs.Output) Option {\n\treturn func(i *InputOptions) error {\n\t\ti.Outputs = outs\n\t\treturn nil\n\t}\n}\n\nfunc WithName(name string) Option {\n\treturn func(i *InputOptions) error {\n\t\ti.Name = name\n\t\treturn nil\n\t}\n}\n\nfunc WithConfigStore(st store.Store[any]) Option {\n\treturn func(i *InputOptions) error {\n\t\ti.Store = st\n\t\treturn nil\n\t}\n}\n\nfunc WithPipeline(pipeline chan *pipeline.Msg) Option {\n\treturn func(i *InputOptions) error {\n\t\ti.Pipeline = pipeline\n\t\treturn nil\n\t}\n}\n\ntype BaseInput struct {\n}\n\nfunc (b *BaseInput) Start(context.Context, string, map[string]any, ...Option) error {\n\treturn nil\n}\n\nfunc (b *BaseInput) Validate(map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseInput) Update(map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseInput) UpdateProcessor(string, map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseInput) Close() error {\n\treturn nil\n}\n\nfunc UpdateProcessorInSlice(\n\tlogger *log.Logger,\n\tstoreObj store.Store[any],\n\teventProcessors []string,\n\tcurrentEvps []formatters.EventProcessor,\n\tprocessorName string,\n\tpcfg map[string]any,\n) ([]formatters.EventProcessor, bool, error) {\n\ttcs, ps, acts, err := pkgutils.GetConfigMaps(storeObj)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tfor i, epName := range eventProcessors {\n\t\tif epName == processorName {\n\t\t\tep, err := formatters.MakeProcessor(logger, processorName, pcfg, ps, tcs, acts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, err\n\t\t\t}\n\n\t\t\tif i >= len(currentEvps) {\n\t\t\t\treturn nil, false, fmt.Errorf(\"output processors are not properly initialized\")\n\t\t\t}\n\n\t\t\t// create new slice with updated processor\n\t\t\tnewEvps := make([]formatters.EventProcessor, len(currentEvps))\n\t\t\tcopy(newEvps, currentEvps)\n\t\t\tnewEvps[i] = ep\n\n\t\t\tlogger.Printf(\"updated event processor %s\", processorName)\n\t\t\treturn newEvps, true, nil\n\t\t}\n\t}\n\n\t// processor not found - return currentEvps\n\treturn currentEvps, false, nil\n}\n"
  },
  {
    "path": "pkg/inputs/jetstream_input/jetstream_input.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage jetstream_input\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/nats-io/nats.go/jetstream\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\tpkgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tloggingPrefix           = \"[jetstream_input:%s] \"\n\tnatsReconnectBufferSize = 100 * 1024 * 1024\n\tdefaultAddress          = \"localhost:4222\"\n\tnatsConnectWait         = 2 * time.Second\n\tdefaultFormat           = \"event\"\n\tdefaultNumWorkers       = 1\n\tdefaultBufferSize       = 500\n\tdefaultFetchBatchSize   = 500\n\tdefaultMaxAckPending    = 1000\n)\n\ntype deliverPolicy string\n\nconst (\n\tdeliverPolicyAll            deliverPolicy = \"all\"\n\tdeliverPolicyLast           deliverPolicy = \"last\"\n\tdeliverPolicyNew            deliverPolicy = \"new\"\n\tdeliverPolicyLastPerSubject deliverPolicy = \"last-per-subject\"\n)\n\nfunc toJSDeliverPolicy(dp deliverPolicy) jetstream.DeliverPolicy {\n\tswitch dp {\n\tcase deliverPolicyAll:\n\t\treturn jetstream.DeliverAllPolicy\n\tcase deliverPolicyLast:\n\t\treturn jetstream.DeliverLastPolicy\n\tcase deliverPolicyNew:\n\t\treturn jetstream.DeliverNewPolicy\n\tcase deliverPolicyLastPerSubject:\n\t\treturn jetstream.DeliverLastPerSubjectPolicy\n\t}\n\treturn 0\n}\n\nfunc init() {\n\tinputs.Register(\"jetstream\", func() inputs.Input {\n\t\treturn &jetstreamInput{\n\t\t\tconfLock: new(sync.RWMutex),\n\t\t\tcfg:      new(atomic.Pointer[config]),\n\t\t\tdynCfg:   new(atomic.Pointer[dynConfig]),\n\t\t\tlogger:   log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\twg:       new(sync.WaitGroup),\n\t\t}\n\t})\n}\n\n// jetstreamInput //\ntype jetstreamInput struct {\n\t// ensure only one Update or UpdateProcessor operation\n\t// are performed at a time\n\tconfLock *sync.RWMutex\n\n\tinputs.BaseInput\n\tcfg    *atomic.Pointer[config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\n\tctx    context.Context\n\tcfn    context.CancelFunc\n\tlogger *log.Logger\n\n\twg       *sync.WaitGroup\n\toutputs  []outputs.Output // used when the cmd is subscribe\n\tstore    store.Store[any]\n\tpipeline chan *pipeline.Msg\n}\n\ntype dynConfig struct {\n\tevps       []formatters.EventProcessor\n\toutputsMap map[string]struct{} // used when the cmd is collector\n}\n\ntype subjectFormat string\n\nconst (\n\tsubjectFormat_Static    = \"static\"\n\tsubjectFormat_TargetSub = \"target.subscription\"\n\tsubjectFormat_SubTarget = \"subscription.target\"\n)\n\n// config //\ntype config struct {\n\tName            string           `mapstructure:\"name,omitempty\"`\n\tAddress         string           `mapstructure:\"address,omitempty\"`\n\tStream          string           `mapstructure:\"stream,omitempty\"`\n\tSubjects        []string         `mapstructure:\"subjects,omitempty\"`\n\tSubjectFormat   subjectFormat    `mapstructure:\"subject-format,omitempty\" json:\"subject-format,omitempty\"`\n\tDeliverPolicy   deliverPolicy    `mapstructure:\"deliver-policy,omitempty\"`\n\tUsername        string           `mapstructure:\"username,omitempty\"`\n\tPassword        string           `mapstructure:\"password,omitempty\"`\n\tConnectTimeWait time.Duration    `mapstructure:\"connect-time-wait,omitempty\"`\n\tTLS             *types.TLSConfig `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tFormat          string           `mapstructure:\"format,omitempty\"`\n\tDebug           bool             `mapstructure:\"debug,omitempty\"`\n\tNumWorkers      int              `mapstructure:\"num-workers,omitempty\"`\n\tBufferSize      int              `mapstructure:\"buffer-size,omitempty\"`\n\tFetchBatchSize  int              `mapstructure:\"fetch-batch-size,omitempty\"`\n\tMaxAckPending   *int             `mapstructure:\"max-ack-pending,omitempty\"`\n\tOutputs         []string         `mapstructure:\"outputs,omitempty\"`\n\tEventProcessors []string         `mapstructure:\"event-processors,omitempty\"`\n}\n\n// Init //\nfunc (n *jetstreamInput) Start(ctx context.Context, name string, cfg map[string]any, opts ...inputs.Option) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\tn.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name))\n\toptions := &inputs.InputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tn.store = options.Store\n\tn.pipeline = options.Pipeline\n\n\tn.setName(options.Name, newCfg)\n\tn.setLogger(options.Logger)\n\toutputs, outputsMap := n.getOutputs(options.Outputs, newCfg)\n\tn.outputs = outputs\n\tevps, err := n.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = n.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.cfg.Store(newCfg)\n\n\tdc := &dynConfig{\n\t\tevps:       evps,\n\t\toutputsMap: outputsMap,\n\t}\n\n\tn.dynCfg.Store(dc)\n\tn.ctx = ctx                // save context for worker restarts\n\tvar runCtx context.Context // create a run context for the workers\n\trunCtx, n.cfn = context.WithCancel(ctx)\n\tn.logger.Printf(\"input starting with config: %+v\", newCfg)\n\tn.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo n.worker(runCtx, i)\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamInput) Validate(cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn n.setDefaultsFor(newCfg)\n}\n\n// Update updates the input configuration and restarts the workers if\n// necessary.\n// It works only when the command is collector (not subscribe).\nfunc (n *jetstreamInput) Update(cfg map[string]any) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.setDefaultsFor(newCfg)\n\tcurrCfg := n.cfg.Load()\n\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\t// build new dynamic config\n\tdc := &dynConfig{\n\t\toutputsMap: make(map[string]struct{}),\n\t}\n\tfor _, o := range newCfg.Outputs {\n\t\tdc.outputsMap[o] = struct{}{}\n\t}\n\n\tprevDC := n.dynCfg.Load()\n\n\tif rebuildProcessors {\n\t\tdc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tn.dynCfg.Store(dc)\n\tn.cfg.Store(newCfg)\n\n\tif restartWorkers {\n\t\trunCtx, cancel := context.WithCancel(n.ctx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := n.cfn\n\t\toldWG := n.wg\n\t\t// swap\n\t\tn.cfn = cancel\n\t\tn.wg = newWG\n\n\t\tn.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo n.worker(runCtx, i)\n\t\t}\n\t\t// cancel old workers and loops\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamInput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tcfg := n.cfg.Load()\n\tdc := n.dynCfg.Load()\n\n\tnewEvps, changed, err := inputs.UpdateProcessorInSlice(\n\t\tn.logger,\n\t\tn.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tn.dynCfg.Store(&newDC)\n\t\tn.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\told.BufferSize != nw.BufferSize ||\n\t\told.FetchBatchSize != nw.FetchBatchSize ||\n\t\told.Address != nw.Address ||\n\t\told.Stream != nw.Stream ||\n\t\tslices.Compare(old.Subjects, nw.Subjects) != 0 ||\n\t\told.DeliverPolicy != nw.DeliverPolicy ||\n\t\told.Username != nw.Username ||\n\t\told.Password != nw.Password ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\told.ConnectTimeWait != nw.ConnectTimeWait ||\n\t\t!maxAckPendingEqual(old.MaxAckPending, nw.MaxAckPending)\n}\n\nfunc maxAckPendingEqual(a, b *int) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn *a == *b\n}\n\nfunc (n *jetstreamInput) worker(ctx context.Context, idx int) {\n\tdefer n.wg.Done()\n\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\n\tn.logger.Printf(\"%s starting\", workerLogPrefix)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tn.logger.Printf(\"worker %d loading config\", idx)\n\t\tcfg := n.cfg.Load()\n\t\twCfg := *cfg\n\t\t// scoped connection, subscription and cleanup\n\t\terr := n.doWork(ctx, idx, &wCfg, workerLogPrefix)\n\t\tif err != nil {\n\t\t\tn.logger.Printf(\"%s JetStream client failed: %v\", workerLogPrefix, err)\n\t\t}\n\n\t\t// backoff before retry\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(wCfg.ConnectTimeWait):\n\t\t}\n\t}\n}\n\n// scoped connection, subscription and cleanup\nfunc (n *jetstreamInput) doWork(ctx context.Context, workerIdx int, wCfg *config, workerLogPrefix string) error {\n\tnc, err := n.createNATSConn(wCfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create NATS connection: %w\", err)\n\t}\n\tdefer nc.Close()\n\n\tjs, err := jetstream.New(nc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create JetStream context: %w\", err)\n\t}\n\n\ts, err := js.Stream(ctx, wCfg.Stream)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get stream: %w\", err)\n\t}\n\n\t// Get stream info to determine retention policy\n\tstreamInfo, err := s.Info(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get stream info: %w\", err)\n\t}\n\n\t// Determine ack policy and deliver policy based on stream retention\n\t// Workqueue streams have specific requirements\n\tackPolicy := jetstream.AckAllPolicy\n\tdeliverPolicy := toJSDeliverPolicy(wCfg.DeliverPolicy)\n\n\tconsumerName := wCfg.Name\n\tif streamInfo.Config.Retention == jetstream.WorkQueuePolicy {\n\t\t// Workqueue streams require explicit ack\n\t\tackPolicy = jetstream.AckExplicitPolicy\n\t\t// Workqueue streams allow DeliverAllPolicy or DeliverNewPolicy\n\t\t// Use configured policy, but only if it's one of these two\n\t\tif deliverPolicy != jetstream.DeliverAllPolicy && deliverPolicy != jetstream.DeliverNewPolicy {\n\t\t\t// Default to DeliverAllPolicy for workqueue if configured policy is not compatible\n\t\t\tdeliverPolicy = jetstream.DeliverAllPolicy\n\t\t}\n\t\t// WorkQueue streams only allow one consumer per unfiltered subject set.\n\t\t// All workers must share the same durable consumer so that concurrent\n\t\t// Fetch() calls distribute work correctly instead of each worker\n\t\t// failing to create its own overlapping consumer.\n\t} else {\n\t\t// For non-WorkQueue streams each worker gets its own independent\n\t\t// consumer cursor so that all workers see all messages.\n\t\tconsumerName = fmt.Sprintf(\"%s-%d\", wCfg.Name, workerIdx)\n\t}\n\n\tc, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{\n\t\tName:           consumerName,\n\t\tDurable:        consumerName,\n\t\tDeliverPolicy:  deliverPolicy,\n\t\tAckPolicy:      ackPolicy,\n\t\tMemoryStorage:  true,\n\t\tFilterSubjects: wCfg.Subjects,\n\t\tMaxAckPending:  *wCfg.MaxAckPending,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create consumer: %w\", err)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tdefault:\n\t\t\t// load current config for dynamic fields like Format\n\t\t\tcfg := n.cfg.Load()\n\t\t\tmb, err := c.FetchNoWait(cfg.FetchBatchSize)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fetch messages: %w\", err)\n\t\t\t}\n\t\t\tfor m := range mb.Messages() {\n\t\t\t\tn.msgHandler(ctx, cfg, m)\n\t\t\t}\n\t\t\tif mb.Error() != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (n *jetstreamInput) msgHandler(ctx context.Context, cfg *config, msg jetstream.Msg) {\n\tmsg.Ack()\n\tif cfg.Debug {\n\t\tn.logger.Printf(\"received msg, subject=%s, len=%d, data=%s\", msg.Subject(), len(msg.Data()), msg.Data())\n\t}\n\n\tdc := n.dynCfg.Load()\n\tswitch cfg.Format {\n\tcase \"event\":\n\t\tevMsgs := make([]*formatters.EventMsg, 1)\n\t\terr := json.Unmarshal(msg.Data(), &evMsgs)\n\t\tif err != nil {\n\t\t\tif cfg.Debug {\n\t\t\t\tn.logger.Printf(\"failed to unmarshal event msg: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tfor _, p := range dc.evps {\n\t\t\tevMsgs = p.Apply(evMsgs...)\n\t\t}\n\n\t\tif n.pipeline != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase n.pipeline <- &pipeline.Msg{\n\t\t\t\tEvents:  evMsgs,\n\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t}:\n\t\t\tdefault:\n\t\t\t\tn.logger.Printf(\"pipeline channel is full, dropping event\")\n\t\t\t}\n\t\t}\n\t\tfor _, o := range n.outputs {\n\t\t\tfor _, ev := range evMsgs {\n\t\t\t\to.WriteEvent(ctx, ev)\n\t\t\t}\n\t\t}\n\n\tcase \"proto\":\n\t\tvar protoMsg = &gnmi.SubscribeResponse{}\n\t\terr := proto.Unmarshal(msg.Data(), protoMsg)\n\t\tif err != nil {\n\t\t\tif cfg.Debug {\n\t\t\t\tn.logger.Printf(\"failed to unmarshal proto msg: %v\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tmeta := n.getMetaFromSubject(msg.Subject(), cfg)\n\n\t\tif n.pipeline != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase n.pipeline <- &pipeline.Msg{\n\t\t\t\tMsg:     protoMsg,\n\t\t\t\tMeta:    meta,\n\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t}:\n\t\t\tdefault:\n\t\t\t\tn.logger.Printf(\"pipeline channel is full, dropping message\")\n\t\t\t}\n\t\t}\n\t\tfor _, o := range n.outputs {\n\t\t\to.Write(ctx, protoMsg, meta)\n\t\t}\n\tdefault:\n\t\tn.logger.Printf(\"unsupported format: %s\", cfg.Format)\n\t}\n}\n\nfunc (n *jetstreamInput) getMetaFromSubject(subject string, wCfg *config) outputs.Meta {\n\tmeta := outputs.Meta{}\n\tsubjectSections := strings.SplitN(subject, \".\", 3)\n\tif len(subjectSections) < 3 {\n\t\treturn meta\n\t}\n\tswitch wCfg.SubjectFormat {\n\tcase subjectFormat_Static:\n\tcase subjectFormat_SubTarget:\n\t\tmeta[\"subscription-name\"] = subjectSections[1]\n\t\tmeta[\"source\"] = subjectSections[2]\n\tcase subjectFormat_TargetSub:\n\t\tmeta[\"subscription-name\"] = subjectSections[2]\n\t\tmeta[\"source\"] = subjectSections[1]\n\t}\n\n\treturn meta\n}\n\n// Close //\nfunc (n *jetstreamInput) Close() error {\n\tif n.cfn != nil {\n\t\tn.cfn()\n\t}\n\tif n.wg != nil {\n\t\tn.wg.Wait()\n\t}\n\treturn nil\n}\n\n// SetLogger //\nfunc (n *jetstreamInput) setLogger(logger *log.Logger) {\n\tif logger != nil && n.logger != nil {\n\t\tn.logger.SetOutput(logger.Writer())\n\t\tn.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// SetOutputs //\nfunc (n *jetstreamInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) {\n\toutputs := make([]outputs.Output, 0)\n\n\tif len(cfg.Outputs) == 0 {\n\t\tfor _, o := range outs {\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t\treturn outputs, nil\n\t}\n\toutputsMap := make(map[string]struct{})\n\tfor _, name := range cfg.Outputs {\n\t\toutputsMap[name] = struct{}{} // for collector\n\t\tif o, ok := outs[name]; ok {  // for subscribe\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t}\n\treturn outputs, outputsMap\n}\n\nfunc (n *jetstreamInput) setName(name string, cfg *config) {\n\tsb := strings.Builder{}\n\tif name != \"\" {\n\t\tsb.WriteString(name)\n\t\tsb.WriteString(\"-\")\n\t}\n\tsb.WriteString(cfg.Name)\n\tsb.WriteString(\"-jetstream-consumer\")\n\tcfg.Name = sb.String()\n}\n\nfunc (n *jetstreamInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := pkgutils.GetConfigMaps(n.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n}\n\n// helper functions\n\nfunc (n *jetstreamInput) setDefaultsFor(cfg *config) error {\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif !(strings.ToLower(cfg.Format) == \"event\" || strings.ToLower(cfg.Format) == \"proto\") {\n\t\treturn fmt.Errorf(\"unsupported input format\")\n\t}\n\tcfg.Format = strings.ToLower(cfg.Format)\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-jetstream-consumer\" + uuid.New().String()\n\t}\n\tif cfg.DeliverPolicy == \"\" {\n\t\tcfg.DeliverPolicy = deliverPolicyAll\n\t}\n\tif cfg.SubjectFormat == \"\" {\n\t\tcfg.SubjectFormat = subjectFormat_Static\n\t}\n\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.ConnectTimeWait <= 0 {\n\t\tcfg.ConnectTimeWait = natsConnectWait\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.BufferSize <= 0 {\n\t\tcfg.BufferSize = defaultBufferSize\n\t}\n\tif cfg.FetchBatchSize <= 0 {\n\t\tcfg.FetchBatchSize = defaultFetchBatchSize\n\t}\n\tif cfg.MaxAckPending == nil || *cfg.MaxAckPending <= -2 {\n\t\tv := defaultMaxAckPending\n\t\tcfg.MaxAckPending = &v\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamInput) createNATSConn(c *config) (*nats.Conn, error) {\n\topts := []nats.Option{\n\t\tnats.Name(c.Name),\n\t\tnats.SetCustomDialer(n),\n\t\tnats.ReconnectWait(c.ConnectTimeWait),\n\t\tnats.ReconnectBufSize(natsReconnectBufferSize),\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tn.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"Disconnected from NATS\")\n\t\t}),\n\t\tnats.ClosedHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t}\n\tif c.Username != \"\" && c.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.Username, c.Password))\n\t}\n\tif c.TLS != nil {\n\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, \"\", c.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif tlsConfig != nil {\n\t\t\topts = append(opts, nats.Secure(tlsConfig))\n\t\t}\n\t}\n\tnc, err := nats.Connect(c.Address, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nc, nil\n}\n\n// Dial //\nfunc (n *jetstreamInput) Dial(network, address string) (net.Conn, error) {\n\tctx, cancel := context.WithCancel(n.ctx)\n\tdefer cancel()\n\n\tfor {\n\t\tn.logger.Printf(\"attempting to connect to %s\", address)\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\t\tcfg := n.cfg.Load()\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn nil, n.ctx.Err()\n\t\tdefault:\n\t\t\td := &net.Dialer{}\n\t\t\tif conn, err := d.DialContext(ctx, network, address); err == nil {\n\t\t\t\tn.logger.Printf(\"successfully connected to NATS server %s\", address)\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/inputs/jetstream_input/jetstream_input_test.go",
    "content": "package jetstream_input\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com/nats-io/nats.go/jetstream\"\n)\n\nfunc Test_setDefaults(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tcfg     *config\n\t\twantErr bool\n\t\terrMsg  string\n\t\tcheck   func(*testing.T, *config)\n\t}{\n\t\t{\n\t\t\tname: \"format defaults to event\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.Format != defaultFormat {\n\t\t\t\t\tt.Errorf(\"setDefaults() Format = %v, want %v\", cfg.Format, defaultFormat)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"deliver policy defaults to all\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.DeliverPolicy != deliverPolicyAll {\n\t\t\t\t\tt.Errorf(\"setDefaults() DeliverPolicy = %v, want %v\", cfg.DeliverPolicy, deliverPolicyAll)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"subject format defaults to static\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.SubjectFormat != subjectFormat_Static {\n\t\t\t\t\tt.Errorf(\"setDefaults() SubjectFormat = %v, want %v\", cfg.SubjectFormat, subjectFormat_Static)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"address defaults to localhost:4222\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.Address != defaultAddress {\n\t\t\t\t\tt.Errorf(\"setDefaults() Address = %v, want %v\", cfg.Address, defaultAddress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"num workers defaults to 1\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.NumWorkers != defaultNumWorkers {\n\t\t\t\t\tt.Errorf(\"setDefaults() NumWorkers = %v, want %v\", cfg.NumWorkers, defaultNumWorkers)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"buffer size defaults to 500\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.BufferSize != defaultBufferSize {\n\t\t\t\t\tt.Errorf(\"setDefaults() BufferSize = %v, want %v\", cfg.BufferSize, defaultBufferSize)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"fetch batch size defaults to 500\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.FetchBatchSize != defaultFetchBatchSize {\n\t\t\t\t\tt.Errorf(\"setDefaults() FetchBatchSize = %v, want %v\", cfg.FetchBatchSize, defaultFetchBatchSize)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max ack pending defaults to 1000\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.MaxAckPending == nil || *cfg.MaxAckPending != defaultMaxAckPending {\n\t\t\t\t\tt.Errorf(\"setDefaults() MaxAckPending = %v, want %v\", cfg.MaxAckPending, defaultMaxAckPending)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid format event\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tFormat: \"invalid\",\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terrMsg:  \"unsupported input format\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid format event\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tFormat: \"event\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.Format != \"event\" {\n\t\t\t\t\tt.Errorf(\"setDefaults() Format = %v, want event\", cfg.Format)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"valid format proto\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tFormat: \"proto\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\tcheck: func(t *testing.T, cfg *config) {\n\t\t\t\tif cfg.Format != \"proto\" {\n\t\t\t\t\tt.Errorf(\"setDefaults() Format = %v, want proto\", cfg.Format)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tn := &jetstreamInput{\n\t\t\t\tlogger: log.New(io.Discard, loggingPrefix, 0),\n\t\t\t}\n\t\t\terr := n.setDefaultsFor(tt.cfg)\n\t\t\tif tt.wantErr {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"setDefaultsFor() expected error but got nil\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tt.errMsg != \"\" && err.Error() != tt.errMsg {\n\t\t\t\t\tt.Errorf(\"setDefaultsFor() error = %v, want error containing %v\", err.Error(), tt.errMsg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"setDefaultsFor() unexpected error = %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tt.check != nil {\n\t\t\t\t\ttt.check(t, tt.cfg)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_toJSDeliverPolicy(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy deliverPolicy\n\t\twant   jetstream.DeliverPolicy\n\t}{\n\t\t{\n\t\t\tname:   \"deliver policy all\",\n\t\t\tpolicy: deliverPolicyAll,\n\t\t\twant:   jetstream.DeliverAllPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"deliver policy last\",\n\t\t\tpolicy: deliverPolicyLast,\n\t\t\twant:   jetstream.DeliverLastPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"deliver policy new\",\n\t\t\tpolicy: deliverPolicyNew,\n\t\t\twant:   jetstream.DeliverNewPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"deliver policy last-per-subject\",\n\t\t\tpolicy: deliverPolicyLastPerSubject,\n\t\t\twant:   jetstream.DeliverLastPerSubjectPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"invalid deliver policy returns zero\",\n\t\t\tpolicy: \"invalid\",\n\t\t\twant:   0,\n\t\t},\n\t\t{\n\t\t\tname:   \"empty deliver policy returns zero\",\n\t\t\tpolicy: \"\",\n\t\t\twant:   0,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := toJSDeliverPolicy(tt.policy)\n\t\t\tif got != tt.want {\n\t\t\t\tt.Errorf(\"toJSDeliverPolicy() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test_workqueueDeliverPolicy documents the expected behavior for workqueue streams\n// When stream retention is WorkQueuePolicy:\n// - AckPolicy is always set to AckExplicitPolicy\n// - DeliverPolicy can be DeliverAllPolicy (process all queued jobs) or DeliverNewPolicy (process only new jobs)\n// - Other deliver policies are converted to DeliverAllPolicy for compatibility\nfunc Test_workqueueDeliverPolicy(t *testing.T) {\n\t// This is a documentation test - actual behavior is tested in integration tests\n\t// The workerStart function should:\n\t// 1. Detect stream retention policy\n\t// 2. Force AckExplicitPolicy for workqueue streams\n\t// 3. Allow DeliverAllPolicy or DeliverNewPolicy\n\t// 4. Convert other policies to DeliverAllPolicy\n\tt.Log(\"Workqueue streams support DeliverAllPolicy and DeliverNewPolicy\")\n}\n"
  },
  {
    "path": "pkg/inputs/kafka_input/kafka_input.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage kafka_input\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/IBM/sarama\"\n\t\"github.com/google/uuid\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\tpkgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\nconst (\n\tloggingPrefix            = \"[kafka_input] \"\n\tdefaultFormat            = \"event\"\n\tdefaultTopic             = \"telemetry\"\n\tdefaultNumWorkers        = 1\n\tdefaultSessionTimeout    = 10 * time.Second\n\tdefaultHeartbeatInterval = 3 * time.Second\n\tdefaultRecoveryWaitTime  = 2 * time.Second\n\tdefaultAddress           = \"localhost:9092\"\n\tdefaultGroupID           = \"gnmic-consumers\"\n)\n\nvar defaultVersion = sarama.V2_5_0_0\n\nvar openSquareBracket = []byte(\"[\")\nvar openCurlyBrace = []byte(\"{\")\n\nfunc init() {\n\tinputs.Register(\"kafka\", func() inputs.Input {\n\t\treturn &KafkaInput{\n\t\t\tconfLock: new(sync.RWMutex),\n\t\t\tcfg:      new(atomic.Pointer[config]),\n\t\t\tdynCfg:   new(atomic.Pointer[dynConfig]),\n\t\t\tlogger:   log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\twg:       new(sync.WaitGroup),\n\t\t}\n\t})\n}\n\n// KafkaInput //\ntype KafkaInput struct {\n\t// ensure only one Update or UpdateProcessor operation\n\t// are performed at a time\n\tconfLock *sync.RWMutex\n\n\tinputs.BaseInput\n\tcfg    *atomic.Pointer[config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\n\tctx    context.Context\n\tcfn    context.CancelFunc\n\tlogger *log.Logger\n\n\twg       *sync.WaitGroup\n\toutputs  []outputs.Output // used when the cmd is subscribe\n\tstore    store.Store[any]\n\tpipeline chan *pipeline.Msg\n}\n\ntype dynConfig struct {\n\tevps       []formatters.EventProcessor\n\toutputsMap map[string]struct{} // used when the cmd is collector\n}\n\n// config //\ntype config struct {\n\tName              string           `mapstructure:\"name,omitempty\"`\n\tAddress           string           `mapstructure:\"address,omitempty\"`\n\tTopics            string           `mapstructure:\"topics,omitempty\"`\n\tSASL              *types.SASL      `mapstructure:\"sasl,omitempty\"`\n\tTLS               *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\tGroupID           string           `mapstructure:\"group-id,omitempty\"`\n\tSessionTimeout    time.Duration    `mapstructure:\"session-timeout,omitempty\"`\n\tHeartbeatInterval time.Duration    `mapstructure:\"heartbeat-interval,omitempty\"`\n\tRecoveryWaitTime  time.Duration    `mapstructure:\"recovery-wait-time,omitempty\"`\n\tVersion           string           `mapstructure:\"version,omitempty\"`\n\tFormat            string           `mapstructure:\"format,omitempty\"`\n\tDebug             bool             `mapstructure:\"debug,omitempty\"`\n\tNumWorkers        int              `mapstructure:\"num-workers,omitempty\"`\n\tOutputs           []string         `mapstructure:\"outputs,omitempty\"`\n\tEventProcessors   []string         `mapstructure:\"event-processors,omitempty\"`\n\n\tkafkaVersion sarama.KafkaVersion\n}\n\nfunc (k *KafkaInput) Start(ctx context.Context, name string, cfg map[string]interface{}, opts ...inputs.Option) error {\n\tk.confLock.Lock()\n\tdefer k.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\toptions := &inputs.InputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tk.store = options.Store\n\tk.pipeline = options.Pipeline\n\n\tk.setName(options.Name, newCfg)\n\tk.setLogger(options.Logger)\n\toutputs, outputsMap := k.getOutputs(options.Outputs, newCfg)\n\tk.outputs = outputs\n\tevps, err := k.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = k.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk.cfg.Store(newCfg)\n\n\tdc := &dynConfig{\n\t\tevps:       evps,\n\t\toutputsMap: outputsMap,\n\t}\n\n\tk.dynCfg.Store(dc)\n\tk.ctx = ctx                // save context for worker restarts\n\tvar runCtx context.Context // create a run context for the workers\n\trunCtx, k.cfn = context.WithCancel(ctx)\n\tk.logger.Printf(\"input starting with config: %+v\", newCfg)\n\tk.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo k.worker(runCtx, i)\n\t}\n\treturn nil\n}\n\nfunc (k *KafkaInput) Validate(cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn k.setDefaultsFor(newCfg)\n}\n\n// Update updates the input configuration and restarts the workers if\n// necessary.\n// It works only when the command is collector (not subscribe).\nfunc (k *KafkaInput) Update(cfg map[string]any) error {\n\tk.confLock.Lock()\n\tdefer k.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.setDefaultsFor(newCfg)\n\tcurrCfg := k.cfg.Load()\n\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\t// build new dynamic config\n\tdc := &dynConfig{\n\t\toutputsMap: make(map[string]struct{}),\n\t}\n\tfor _, o := range newCfg.Outputs {\n\t\tdc.outputsMap[o] = struct{}{}\n\t}\n\n\tprevDC := k.dynCfg.Load()\n\n\tif rebuildProcessors {\n\t\tdc.evps, err = k.buildEventProcessors(k.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tk.dynCfg.Store(dc)\n\tk.cfg.Store(newCfg)\n\n\tif restartWorkers {\n\t\trunCtx, cancel := context.WithCancel(k.ctx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := k.cfn\n\t\toldWG := k.wg\n\t\t// swap\n\t\tk.cfn = cancel\n\t\tk.wg = newWG\n\n\t\tk.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo k.worker(runCtx, i)\n\t\t}\n\t\t// cancel old workers and loops\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (k *KafkaInput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tk.confLock.Lock()\n\tdefer k.confLock.Unlock()\n\n\tcfg := k.cfg.Load()\n\tdc := k.dynCfg.Load()\n\n\tnewEvps, changed, err := inputs.UpdateProcessorInSlice(\n\t\tk.logger,\n\t\tk.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tk.dynCfg.Store(&newDC)\n\t\tk.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (k *KafkaInput) worker(ctx context.Context, idx int) {\n\tdefer k.wg.Done()\n\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\n\tk.logger.Printf(\"%s starting\", workerLogPrefix)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tk.logger.Printf(\"worker %d loading config\", idx)\n\t\tcfg := k.cfg.Load()\n\t\twCfg := *cfg\n\t\twCfg.Name = fmt.Sprintf(\"%s-%d\", wCfg.Name, idx)\n\t\t// scoped connection, subscription and cleanup\n\t\terr := k.doWork(ctx, &wCfg, workerLogPrefix, idx)\n\t\tif err != nil {\n\t\t\tk.logger.Printf(\"%s Kafka client failed: %v\", workerLogPrefix, err)\n\t\t}\n\n\t\t// backoff before retry\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(wCfg.RecoveryWaitTime):\n\t\t}\n\t}\n}\n\n// scoped connection, subscription and cleanup\nfunc (k *KafkaInput) doWork(ctx context.Context, wCfg *config, workerLogPrefix string, idx int) error {\n\tsaramaConfig, err := k.createConfig(wCfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create Kafka config: %w\", err)\n\t}\n\tsaramaConfig.ClientID = fmt.Sprintf(\"%s-%d\", wCfg.Name, idx)\n\n\tconsumerGrp, err := sarama.NewConsumerGroup(strings.Split(wCfg.Address, \",\"), wCfg.GroupID, saramaConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create consumer group: %w\", err)\n\t}\n\tdefer consumerGrp.Close()\n\n\tcons := &consumer{\n\t\tready:   make(chan bool),\n\t\tmsgChan: make(chan *sarama.ConsumerMessage),\n\t}\n\tstopConsume := make(chan struct{})\n\tgo func() {\n\t\tvar err error\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-stopConsume:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\terr = consumerGrp.Consume(ctx, strings.Split(wCfg.Topics, \",\"), cons)\n\t\t\tif err != nil {\n\t\t\t\tif wCfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"%s failed to start consumer, topics=%q, group=%q : %v\", workerLogPrefix, wCfg.Topics, wCfg.GroupID, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcons.ready = make(chan bool)\n\t\t}\n\t}()\n\t// wait for the consumer to be ready\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil\n\tcase <-cons.ready:\n\t\tk.logger.Printf(\"%s kafka consumer ready\", workerLogPrefix)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase m := <-cons.msgChan:\n\t\t\tif len(m.Value) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// load current config for dynamic fields like Format\n\t\t\tcfg := k.cfg.Load()\n\t\t\tif cfg.Debug {\n\t\t\t\tk.logger.Printf(\"%s client=%s received msg, topic=%s, partition=%d, key=%q, length=%d, value=%s\", workerLogPrefix, saramaConfig.ClientID, m.Topic, m.Partition, string(m.Key), len(m.Value), string(m.Value))\n\t\t\t}\n\n\t\t\tdc := k.dynCfg.Load()\n\t\t\tswitch cfg.Format {\n\t\t\tcase \"event\":\n\t\t\t\tm.Value = bytes.TrimSpace(m.Value)\n\t\t\t\tevMsgs := make([]*formatters.EventMsg, 1)\n\t\t\t\tvar err error\n\t\t\t\tswitch {\n\t\t\t\tcase len(m.Value) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tcase m.Value[0] == openSquareBracket[0]:\n\t\t\t\t\terr = json.Unmarshal(m.Value, &evMsgs)\n\t\t\t\tcase m.Value[0] == openCurlyBrace[0]:\n\t\t\t\t\tevMsgs[0] = &formatters.EventMsg{}\n\t\t\t\t\terr = json.Unmarshal(m.Value, evMsgs[0])\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tk.logger.Printf(\"%s failed to unmarshal event msg: %v\", workerLogPrefix, err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, p := range dc.evps {\n\t\t\t\t\tevMsgs = p.Apply(evMsgs...)\n\t\t\t\t}\n\n\t\t\t\tif k.pipeline != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase k.pipeline <- &pipeline.Msg{\n\t\t\t\t\t\tEvents:  evMsgs,\n\t\t\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t\t\t}:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tk.logger.Printf(\"pipeline channel is full, dropping event\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, o := range k.outputs {\n\t\t\t\t\tfor _, ev := range evMsgs {\n\t\t\t\t\t\to.WriteEvent(ctx, ev)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase \"proto\":\n\t\t\t\tprotoMsg := new(gnmi.SubscribeResponse)\n\t\t\t\tif err := proto.Unmarshal(m.Value, protoMsg); err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tk.logger.Printf(\"%s failed to unmarshal proto msg: %v\", workerLogPrefix, err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"m.Key: %s\\n\", string(m.Key))\n\t\t\t\tmeta := k.partitionKeyToMeta(m.Key)\n\t\t\t\tfmt.Printf(\"meta: %+v\\n\", meta)\n\t\t\t\tif k.pipeline != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase k.pipeline <- &pipeline.Msg{\n\t\t\t\t\t\tMsg:     protoMsg,\n\t\t\t\t\t\tMeta:    meta,\n\t\t\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t\t\t}:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tk.logger.Printf(\"pipeline channel is full, dropping message\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, o := range k.outputs {\n\t\t\t\t\to.Write(ctx, protoMsg, meta)\n\t\t\t\t}\n\t\t\t}\n\t\tcase err := <-consumerGrp.Errors():\n\t\t\tcfg := k.cfg.Load()\n\t\t\tk.logger.Printf(\"%s client=%s, consumer-group=%s error: %v\", workerLogPrefix, saramaConfig.ClientID, cfg.GroupID, err)\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil\n\t\t\tcase <-time.After(cfg.RecoveryWaitTime):\n\t\t\t}\n\t\t\tclose(stopConsume)\n\t\t\t// restart worker in case of error\n\t\t\tgo k.doWork(ctx, cfg, workerLogPrefix, idx)\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (k *KafkaInput) Close() error {\n\tif k.cfn != nil {\n\t\tk.cfn()\n\t}\n\tif k.wg != nil {\n\t\tk.wg.Wait()\n\t}\n\treturn nil\n}\n\nconst (\n\tpartitionKeySeparator = \":::\"\n)\n\nfunc (k *KafkaInput) partitionKeyToMeta(key []byte) outputs.Meta {\n\tif len(key) == 0 {\n\t\treturn outputs.Meta{}\n\t}\n\tparts := strings.SplitN(string(key), partitionKeySeparator, 2)\n\tif len(parts) != 2 {\n\t\treturn outputs.Meta{}\n\t}\n\treturn outputs.Meta{\n\t\t\"source\":            parts[0],\n\t\t\"subscription-name\": parts[1],\n\t}\n}\n\nfunc (k *KafkaInput) setLogger(logger *log.Logger) {\n\tif logger != nil {\n\t\tk.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags())\n\t\tsarama.Logger = k.logger\n\t}\n}\n\nfunc (k *KafkaInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) {\n\toutputs := make([]outputs.Output, 0)\n\n\tif len(cfg.Outputs) == 0 {\n\t\tfor _, o := range outs {\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t\treturn outputs, nil\n\t}\n\toutputsMap := make(map[string]struct{})\n\tfor _, name := range cfg.Outputs {\n\t\toutputsMap[name] = struct{}{} // for collector\n\t\tif o, ok := outs[name]; ok {  // for subscribe\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t}\n\treturn outputs, outputsMap\n}\n\nfunc (k *KafkaInput) setName(name string, cfg *config) {\n\tsb := strings.Builder{}\n\tif name != \"\" {\n\t\tsb.WriteString(name)\n\t\tsb.WriteString(\"-\")\n\t}\n\tsb.WriteString(cfg.Name)\n\tsb.WriteString(\"-kafka-cons\")\n\tcfg.Name = sb.String()\n}\n\nfunc (k *KafkaInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := pkgutils.GetConfigMaps(k.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n}\n\n// helper funcs\n\nfunc (k *KafkaInput) setDefaultsFor(cfg *config) error {\n\tvar err error\n\tif cfg.Version != \"\" {\n\t\tcfg.kafkaVersion, err = sarama.ParseKafkaVersion(cfg.Version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcfg.kafkaVersion = defaultVersion\n\n\t}\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif !(strings.ToLower(cfg.Format) == \"event\" || strings.ToLower(cfg.Format) == \"proto\") {\n\t\treturn fmt.Errorf(\"unsupported input format\")\n\t}\n\tcfg.Format = strings.ToLower(cfg.Format)\n\tif cfg.Topics == \"\" {\n\t\tcfg.Topics = defaultTopic\n\t}\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.SessionTimeout <= 2*time.Millisecond {\n\t\tcfg.SessionTimeout = defaultSessionTimeout\n\t}\n\tif cfg.HeartbeatInterval <= 1*time.Millisecond {\n\t\tcfg.HeartbeatInterval = defaultHeartbeatInterval\n\t}\n\tif cfg.GroupID == \"\" {\n\t\tcfg.GroupID = defaultGroupID\n\t}\n\tif cfg.RecoveryWaitTime <= 0 {\n\t\tcfg.RecoveryWaitTime = defaultRecoveryWaitTime\n\t}\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-\" + uuid.New().String()\n\t}\n\tif cfg.SASL == nil {\n\t\treturn nil\n\t}\n\tcfg.SASL.Mechanism = strings.ToUpper(cfg.SASL.Mechanism)\n\tswitch cfg.SASL.Mechanism {\n\tcase \"\":\n\t\tcfg.SASL.Mechanism = \"PLAIN\"\n\tcase \"OAUTHBEARER\":\n\t\tif cfg.SASL.TokenURL == \"\" {\n\t\t\treturn errors.New(\"missing token-url for kafka SASL mechanism OAUTHBEARER\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (k *KafkaInput) createConfig(cfg *config) (*sarama.Config, error) {\n\tsaramaCfg := sarama.NewConfig()\n\tsaramaCfg.Version = cfg.kafkaVersion\n\tsaramaCfg.Consumer.Return.Errors = true\n\tsaramaCfg.Consumer.Group.Session.Timeout = cfg.SessionTimeout\n\tsaramaCfg.Consumer.Group.Heartbeat.Interval = cfg.HeartbeatInterval\n\tsaramaCfg.Consumer.Group.Rebalance.Strategy = sarama.NewBalanceStrategyRange()\n\t// SASL_PLAINTEXT or SASL_SSL\n\tif cfg.SASL != nil {\n\t\tsaramaCfg.Net.SASL.Enable = true\n\t\tsaramaCfg.Net.SASL.User = cfg.SASL.User\n\t\tsaramaCfg.Net.SASL.Password = cfg.SASL.Password\n\t\tsaramaCfg.Net.SASL.Mechanism = sarama.SASLMechanism(cfg.SASL.Mechanism)\n\t\tswitch saramaCfg.Net.SASL.Mechanism {\n\t\tcase sarama.SASLTypeSCRAMSHA256:\n\t\t\tsaramaCfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {\n\t\t\t\treturn &XDGSCRAMClient{HashGeneratorFcn: SHA256}\n\t\t\t}\n\t\tcase sarama.SASLTypeSCRAMSHA512:\n\t\t\tsaramaCfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {\n\t\t\t\treturn &XDGSCRAMClient{HashGeneratorFcn: SHA512}\n\t\t\t}\n\t\tcase sarama.SASLTypeOAuth:\n\t\t\tsaramaCfg.Net.SASL.TokenProvider = pkgutils.NewTokenProvider(saramaCfg.Net.SASL.User, saramaCfg.Net.SASL.Password, cfg.SASL.TokenURL)\n\t\t}\n\t}\n\t// SSL or SASL_SSL\n\tif cfg.TLS != nil {\n\t\tvar err error\n\t\tsaramaCfg.Net.TLS.Enable = true\n\t\tsaramaCfg.Net.TLS.Config, err = utils.NewTLSConfig(\n\t\t\tcfg.TLS.CaFile,\n\t\t\tcfg.TLS.CertFile,\n\t\t\tcfg.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tcfg.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn saramaCfg, nil\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\told.Address != nw.Address ||\n\t\told.Topics != nw.Topics ||\n\t\told.GroupID != nw.GroupID ||\n\t\told.SessionTimeout != nw.SessionTimeout ||\n\t\told.HeartbeatInterval != nw.HeartbeatInterval ||\n\t\told.RecoveryWaitTime != nw.RecoveryWaitTime ||\n\t\told.kafkaVersion != nw.kafkaVersion ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\t!saslEq(old.SASL, nw.SASL)\n}\n\nfunc saslEq(a, b *types.SASL) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.User == b.User &&\n\t\ta.Password == b.Password &&\n\t\tstrings.EqualFold(a.Mechanism, b.Mechanism) &&\n\t\ta.TokenURL == b.TokenURL\n}\n\n// consumer\n// ref: https://github.com/Shopify/sarama/blob/master/examples/consumergroup/main.go\n// consumer represents a Sarama consumer group consumer\ntype consumer struct {\n\tready   chan bool\n\tmsgChan chan *sarama.ConsumerMessage\n}\n\n// Setup is run at the beginning of a new session, before ConsumeClaim\nfunc (consumer *consumer) Setup(sarama.ConsumerGroupSession) error {\n\t// Mark the consumer as ready\n\tclose(consumer.ready)\n\treturn nil\n}\n\n// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited\nfunc (consumer *consumer) Cleanup(sarama.ConsumerGroupSession) error {\n\treturn nil\n}\n\n// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().\nfunc (consumer *consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {\n\tfor message := range claim.Messages() {\n\t\tconsumer.msgChan <- message\n\t\tsession.MarkMessage(message, \"\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/inputs/kafka_input/kafka_scram_client.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage kafka_input\n\nimport (\n\t\"crypto/sha256\"\n\t\"crypto/sha512\"\n\t\"hash\"\n\n\t\"github.com/xdg/scram\"\n)\n\nvar SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() }\nvar SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() }\n\ntype XDGSCRAMClient struct {\n\t*scram.Client\n\t*scram.ClientConversation\n\tscram.HashGeneratorFcn\n}\n\nfunc (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {\n\tx.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tx.ClientConversation = x.Client.NewConversation()\n\treturn nil\n}\n\nfunc (x *XDGSCRAMClient) Step(challenge string) (response string, err error) {\n\tresponse, err = x.ClientConversation.Step(challenge)\n\treturn\n}\n\nfunc (x *XDGSCRAMClient) Done() bool {\n\treturn x.ClientConversation.Done()\n}\n"
  },
  {
    "path": "pkg/inputs/nats_input/nats_input.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage nats_input\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/inputs\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/openconfig/gnmic/pkg/pipeline\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tloggingPrefix           = \"[nats_input] \"\n\tnatsReconnectBufferSize = 100 * 1024 * 1024\n\tdefaultAddress          = \"localhost:4222\"\n\tnatsConnectWait         = 2 * time.Second\n\tdefaultFormat           = \"event\"\n\tdefaultSubject          = \"telemetry\"\n\tdefaultNumWorkers       = 1\n\tdefaultBufferSize       = 100\n)\n\nfunc init() {\n\tinputs.Register(\"nats\", func() inputs.Input {\n\t\treturn &natsInput{\n\t\t\tconfLock: new(sync.RWMutex),\n\t\t\tcfg:      new(atomic.Pointer[config]),\n\t\t\tdynCfg:   new(atomic.Pointer[dynConfig]),\n\t\t\tlogger:   log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\twg:       new(sync.WaitGroup),\n\t\t}\n\t})\n}\n\n// natsInput //\ntype natsInput struct {\n\t// ensure only one Update or UpdateProcessor operation\n\t// are performed at a time\n\tconfLock *sync.RWMutex\n\n\tinputs.BaseInput\n\tcfg    *atomic.Pointer[config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\n\tctx    context.Context\n\tcfn    context.CancelFunc\n\tlogger *log.Logger\n\n\twg       *sync.WaitGroup\n\toutputs  []outputs.Output // used when the cmd is subscribe\n\tstore    store.Store[any]\n\tpipeline chan *pipeline.Msg\n}\n\ntype dynConfig struct {\n\tevps       []formatters.EventProcessor\n\toutputsMap map[string]struct{} // used when the cmd is collector\n}\n\n// config //\ntype config struct {\n\tName            string           `mapstructure:\"name,omitempty\"`\n\tAddress         string           `mapstructure:\"address,omitempty\"`\n\tSubject         string           `mapstructure:\"subject,omitempty\"`\n\tQueue           string           `mapstructure:\"queue,omitempty\"`\n\tUsername        string           `mapstructure:\"username,omitempty\"`\n\tPassword        string           `mapstructure:\"password,omitempty\"`\n\tConnectTimeWait time.Duration    `mapstructure:\"connect-time-wait,omitempty\"`\n\tTLS             *types.TLSConfig `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tFormat          string           `mapstructure:\"format,omitempty\"`\n\tDebug           bool             `mapstructure:\"debug,omitempty\"`\n\tNumWorkers      int              `mapstructure:\"num-workers,omitempty\"`\n\tBufferSize      int              `mapstructure:\"buffer-size,omitempty\"`\n\tOutputs         []string         `mapstructure:\"outputs,omitempty\"`\n\tEventProcessors []string         `mapstructure:\"event-processors,omitempty\"`\n}\n\n// Init //\nfunc (n *natsInput) Start(ctx context.Context, name string, cfg map[string]any, opts ...inputs.Option) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\tn.logger.SetPrefix(fmt.Sprintf(\"%s%s\", loggingPrefix, newCfg.Name))\n\toptions := &inputs.InputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tn.store = options.Store\n\tn.pipeline = options.Pipeline\n\n\tn.setName(options.Name, newCfg)\n\tn.setLogger(options.Logger)\n\toutputs, outputsMap := n.getOutputs(options.Outputs, newCfg)\n\tn.outputs = outputs\n\tevps, err := n.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = n.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.cfg.Store(newCfg)\n\n\tdc := &dynConfig{\n\t\tevps:       evps,\n\t\toutputsMap: outputsMap,\n\t}\n\n\tn.dynCfg.Store(dc)\n\tn.ctx = ctx                // save context for worker restarts\n\tvar runCtx context.Context // create a run context for the workers\n\trunCtx, n.cfn = context.WithCancel(ctx)\n\tn.logger.Printf(\"input starting with config: %+v\", newCfg)\n\tn.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo n.worker(runCtx, i)\n\t}\n\treturn nil\n}\n\nfunc (n *natsInput) Validate(cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn n.setDefaultsFor(newCfg)\n}\n\n// Update updates the input configuration and restarts the workers if\n// necessary.\n// It works only when the command is collector (not subscribe).\nfunc (n *natsInput) Update(cfg map[string]any) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.setDefaultsFor(newCfg)\n\tcurrCfg := n.cfg.Load()\n\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\t// build new dynamic config\n\tdc := &dynConfig{\n\t\toutputsMap: make(map[string]struct{}),\n\t}\n\tfor _, o := range newCfg.Outputs {\n\t\tdc.outputsMap[o] = struct{}{}\n\t}\n\n\tprevDC := n.dynCfg.Load()\n\n\tif rebuildProcessors {\n\t\tdc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tn.dynCfg.Store(dc)\n\tn.cfg.Store(newCfg)\n\n\tif restartWorkers {\n\t\trunCtx, cancel := context.WithCancel(n.ctx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := n.cfn\n\t\toldWG := n.wg\n\t\t// swap\n\t\tn.cfn = cancel\n\t\tn.wg = newWG\n\n\t\tn.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo n.worker(runCtx, i)\n\t\t}\n\t\t// cancel old workers and loops\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *natsInput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tn.confLock.Lock()\n\tdefer n.confLock.Unlock()\n\n\tcfg := n.cfg.Load()\n\tdc := n.dynCfg.Load()\n\n\tnewEvps, changed, err := inputs.UpdateProcessorInSlice(\n\t\tn.logger,\n\t\tn.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tn.dynCfg.Store(&newDC)\n\t\tn.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\told.BufferSize != nw.BufferSize ||\n\t\told.Address != nw.Address ||\n\t\told.Subject != nw.Subject ||\n\t\told.Queue != nw.Queue ||\n\t\told.Username != nw.Username ||\n\t\told.Password != nw.Password ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\told.ConnectTimeWait != nw.ConnectTimeWait\n}\n\nfunc (n *natsInput) worker(ctx context.Context, idx int) {\n\tdefer n.wg.Done()\n\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\n\tn.logger.Printf(\"%s starting\", workerLogPrefix)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tn.logger.Printf(\"worker %d loading config\", idx)\n\t\tcfg := n.cfg.Load()\n\t\twCfg := *cfg\n\t\twCfg.Name = fmt.Sprintf(\"%s-%d\", wCfg.Name, idx)\n\t\tfmt.Printf(\"worker %d starting with config: %+v\", idx, wCfg)\n\t\t// scoped connection, subscription and cleanup\n\t\terr := n.doWork(ctx, &wCfg, workerLogPrefix)\n\t\tif err != nil {\n\t\t\tn.logger.Printf(\"%s NATS client failed: %v\", workerLogPrefix, err)\n\t\t}\n\n\t\t// backoff before retry\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-time.After(wCfg.ConnectTimeWait):\n\t\t}\n\t}\n}\n\n// scoped connection, subscription and cleanup\nfunc (n *natsInput) doWork(ctx context.Context, wCfg *config, workerLogPrefix string) error {\n\tnc, err := n.createNATSConn(wCfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create NATS connection: %w\", err)\n\t}\n\tdefer nc.Close()\n\n\tmsgChan := make(chan *nats.Msg, wCfg.BufferSize)\n\n\tsub, err := nc.ChanQueueSubscribe(wCfg.Subject, wCfg.Queue, msgChan)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create subscription: %w\", err)\n\t}\n\tdefer sub.Unsubscribe()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase m, ok := <-msgChan:\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"msg channel closed\")\n\t\t\t}\n\t\t\tif len(m.Data) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// load current config for dynamic fields like Format\n\t\t\tcfg := n.cfg.Load()\n\t\t\tif cfg.Debug {\n\t\t\t\tn.logger.Printf(\"received msg, subject=%s, queue=%s, len=%d, data=%s\",\n\t\t\t\t\tm.Subject, m.Sub.Queue, len(m.Data), string(m.Data))\n\t\t\t}\n\n\t\t\tdc := n.dynCfg.Load()\n\t\t\tswitch cfg.Format {\n\t\t\tcase \"event\":\n\t\t\t\tvar evMsgs []*formatters.EventMsg\n\t\t\t\tif err := json.Unmarshal(m.Data, &evMsgs); err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tn.logger.Printf(\"%s failed to unmarshal event msg: %v\", workerLogPrefix, err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, p := range dc.evps {\n\t\t\t\t\tevMsgs = p.Apply(evMsgs...)\n\t\t\t\t}\n\n\t\t\t\tif n.pipeline != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase n.pipeline <- &pipeline.Msg{\n\t\t\t\t\t\tEvents:  evMsgs,\n\t\t\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t\t\t}:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tn.logger.Printf(\"pipeline channel is full, dropping event\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, o := range n.outputs {\n\t\t\t\t\tfor _, ev := range evMsgs {\n\t\t\t\t\t\to.WriteEvent(ctx, ev)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase \"proto\":\n\t\t\t\tprotoMsg := new(gnmi.SubscribeResponse)\n\t\t\t\tif err := proto.Unmarshal(m.Data, protoMsg); err != nil {\n\t\t\t\t\tn.logger.Printf(\"failed to unmarshal proto msg: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmeta := outputs.Meta{}\n\t\t\t\tparts := strings.SplitN(m.Subject, \".\", 3)\n\t\t\t\tif len(parts) == 3 {\n\t\t\t\t\tmeta[\"source\"] = strings.ReplaceAll(parts[1], \"-\", \".\")\n\t\t\t\t\tmeta[\"subscription-name\"] = parts[2]\n\t\t\t\t}\n\n\t\t\t\tif n.pipeline != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase n.pipeline <- &pipeline.Msg{\n\t\t\t\t\t\tMsg:     protoMsg,\n\t\t\t\t\t\tMeta:    meta,\n\t\t\t\t\t\tOutputs: dc.outputsMap,\n\t\t\t\t\t}:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tn.logger.Printf(\"pipeline channel is full, dropping message\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, o := range n.outputs {\n\t\t\t\t\to.Write(ctx, protoMsg, meta)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Close //\nfunc (n *natsInput) Close() error {\n\tif n.cfn != nil {\n\t\tn.cfn()\n\t}\n\tif n.wg != nil {\n\t\tn.wg.Wait()\n\t}\n\treturn nil\n}\n\n// SetLogger //\nfunc (n *natsInput) setLogger(logger *log.Logger) {\n\tif logger != nil && n.logger != nil {\n\t\tn.logger.SetOutput(logger.Writer())\n\t\tn.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// SetOutputs //\nfunc (n *natsInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) {\n\toutputs := make([]outputs.Output, 0)\n\n\tif len(cfg.Outputs) == 0 {\n\t\tfor _, o := range outs {\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t\treturn outputs, nil\n\t}\n\toutputsMap := make(map[string]struct{})\n\tfor _, name := range cfg.Outputs {\n\t\toutputsMap[name] = struct{}{} // for collector\n\t\tif o, ok := outs[name]; ok {  // for subscribe\n\t\t\toutputs = append(outputs, o)\n\t\t}\n\t}\n\treturn outputs, outputsMap\n}\n\nfunc (n *natsInput) setName(name string, cfg *config) {\n\tsb := strings.Builder{}\n\tif name != \"\" {\n\t\tsb.WriteString(name)\n\t\tsb.WriteString(\"-\")\n\t}\n\tsb.WriteString(cfg.Name)\n\tsb.WriteString(\"-nats-sub\")\n\tcfg.Name = sb.String()\n}\n\nfunc (n *natsInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(n.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n}\n\n// helper functions\n\nfunc (n *natsInput) setDefaultsFor(cfg *config) error {\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif !(strings.ToLower(cfg.Format) == \"event\" || strings.ToLower(cfg.Format) == \"proto\") {\n\t\treturn fmt.Errorf(\"unsupported input format\")\n\t}\n\tcfg.Format = strings.ToLower(cfg.Format)\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-\" + uuid.New().String()\n\t}\n\tif cfg.Subject == \"\" {\n\t\tcfg.Subject = defaultSubject\n\t}\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.ConnectTimeWait <= 0 {\n\t\tcfg.ConnectTimeWait = natsConnectWait\n\t}\n\tif cfg.Queue == \"\" {\n\t\tcfg.Queue = cfg.Name\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.BufferSize <= 0 {\n\t\tcfg.BufferSize = defaultBufferSize\n\t}\n\treturn nil\n}\n\nfunc (n *natsInput) createNATSConn(c *config) (*nats.Conn, error) {\n\topts := []nats.Option{\n\t\tnats.Name(c.Name),\n\t\tnats.SetCustomDialer(n),\n\t\tnats.ReconnectWait(c.ConnectTimeWait),\n\t\tnats.ReconnectBufSize(natsReconnectBufferSize),\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tn.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"Disconnected from NATS\")\n\t\t}),\n\t\tnats.ClosedHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t}\n\tif c.Username != \"\" && c.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.Username, c.Password))\n\t}\n\tif c.TLS != nil {\n\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, \"\", c.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif tlsConfig != nil {\n\t\t\topts = append(opts, nats.Secure(tlsConfig))\n\t\t}\n\t}\n\tnc, err := nats.Connect(c.Address, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nc, nil\n}\n\n// Dial //\nfunc (n *natsInput) Dial(network, address string) (net.Conn, error) {\n\tctx, cancel := context.WithCancel(n.ctx)\n\tdefer cancel()\n\n\tfor {\n\t\tn.logger.Printf(\"attempting to connect to %s\", address)\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\t\tcfg := n.cfg.Load()\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn nil, n.ctx.Err()\n\t\tdefault:\n\t\t\td := &net.Dialer{}\n\t\t\tif conn, err := d.DialContext(ctx, network, address); err == nil {\n\t\t\t\tn.logger.Printf(\"successfully connected to NATS server %s\", address)\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/loaders/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/loaders/consul_loader\"\n\t_ \"github.com/openconfig/gnmic/pkg/loaders/docker_loader\"\n\t_ \"github.com/openconfig/gnmic/pkg/loaders/file_loader\"\n\t_ \"github.com/openconfig/gnmic/pkg/loaders/http_loader\"\n)\n"
  },
  {
    "path": "pkg/loaders/consul_loader/consul_loader.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_loader\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/hashicorp/consul/api\"\n\t\"github.com/mitchellh/mapstructure\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nconst (\n\tloggingPrefix  = \"[consul_loader] \"\n\tloaderType     = \"consul\"\n\tdefaultAddress = \"localhost:8500\"\n\tdefaultPrefix  = \"gnmic/config/targets\"\n\t//\n\tdefaultWatchTimeout  = 1 * time.Minute\n\tdefaultActionTimeout = 30 * time.Second\n)\n\nvar templateFunctions = template.FuncMap{\"join\": strings.Join}\n\nfunc init() {\n\tloaders.Register(loaderType, func() loaders.TargetLoader {\n\t\treturn &consulLoader{\n\t\t\tcfg:         &cfg{},\n\t\t\tm:           new(sync.Mutex),\n\t\t\tlastTargets: make(map[string]map[string]*types.TargetConfig),\n\t\t\tlogger:      log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\ntype consulLoader struct {\n\tcfg *cfg\n\t// decoder        *consulstructure.Decoder\n\tclient *api.Client\n\tm      *sync.Mutex\n\t// map of targets per service\n\tlastTargets    map[string]map[string]*types.TargetConfig\n\ttargetConfigFn func(*types.TargetConfig) error\n\tlogger         *log.Logger\n\t//\n\tvars          map[string]interface{}\n\tactionsConfig map[string]map[string]interface{}\n\taddActions    []actions.Action\n\tdelActions    []actions.Action\n\tnumActions    int\n}\n\ntype cfg struct {\n\t// Consul server address\n\tAddress string `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\t// Consul datacenter name, defaults to dc1\n\tDatacenter string `mapstructure:\"datacenter,omitempty\" json:\"datacenter,omitempty\"`\n\t// Consul username\n\tUsername string `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\t// Consul Password\n\tPassword string `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\t// Consul token\n\tToken string `mapstructure:\"token,omitempty\" json:\"token,omitempty\"`\n\t// enable debug\n\tDebug bool `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\t// KV based target config loading\n\tKeyPrefix string `mapstructure:\"key-prefix,omitempty\" json:\"key-prefix,omitempty\"`\n\t// Service based target config loading\n\tServices []*serviceDef `mapstructure:\"services,omitempty\" json:\"services,omitempty\"`\n\t// if true, registers consulLoader prometheus metrics with the provided\n\t// prometheus registry\n\tEnableMetrics bool `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\t// variables definitions to be passed to the actions\n\tVars map[string]interface{}\n\t// variable file, values in this file will be overwritten by\n\t// the ones defined in Vars\n\tVarsFile string `mapstructure:\"vars-file,omitempty\" json:\"vars-file,omitempty\"`\n\t// list of Actions to run on new target discovery\n\tOnAdd []string `mapstructure:\"on-add,omitempty\" json:\"on-add,omitempty\"`\n\t// list of Actions to run on target removal\n\tOnDelete []string `mapstructure:\"on-delete,omitempty\" json:\"on-delete,omitempty\"`\n\t// timeout for the actions, this applies for all actions as a whole (on-add + on-delete),\n\t// not to each action individually.\n\tActionsTimeout time.Duration `mapstructure:\"actions-timeout,omitempty\" json:\"actions-timeout,omitempty\"`\n}\n\ntype serviceDef struct {\n\tName   string                 `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tTags   []string               `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tConfig map[string]interface{} `mapstructure:\"config,omitempty\" json:\"config,omitempty\"`\n\n\ttags               map[string]struct{}\n\ttargetNameTemplate *template.Template\n\ttargetTagsTemplate map[string]*template.Template\n}\n\nfunc (c *consulLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error {\n\terr := loaders.DecodeConfig(cfg, c.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = c.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\tif logger != nil {\n\t\tc.logger.SetOutput(logger.Writer())\n\t\tc.logger.SetFlags(logger.Flags())\n\t}\n\n\tfor _, se := range c.cfg.Services {\n\t\tse.tags = make(map[string]struct{})\n\t\tfor _, t := range se.Tags {\n\t\t\tse.tags[t] = struct{}{}\n\t\t}\n\t}\n\t// parse tempaltes if present\n\tfor i, se := range c.cfg.Services {\n\t\tif se.Config == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif name, ok := se.Config[\"name\"].(string); ok {\n\t\t\tnameTemplate, err := template.New(fmt.Sprintf(\"targetName-%d\", i)).Funcs(templateFunctions).Option(\"missingkey=zero\").Parse(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tse.targetNameTemplate = nameTemplate\n\t\t}\n\t\tif eventTags, ok := se.Config[\"event-tags\"].(map[string]any); ok {\n\t\t\tse.targetTagsTemplate = make(map[string]*template.Template)\n\t\t\tfor tagName, tagTemplateString := range eventTags {\n\t\t\t\ttagTemplate, err := template.New(fmt.Sprintf(\"tagTemplate-%s-%d\", tagName, i)).Funcs(templateFunctions).Option(\"missingkey=zero\").Parse(fmt.Sprintf(\"%v\", tagTemplateString))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tse.targetTagsTemplate[tagName] = tagTemplate\n\t\t\t}\n\t\t}\n\t}\n\n\terr = c.readVars(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, actName := range c.cfg.OnAdd {\n\t\tif cfg, ok := c.actionsConfig[actName]; ok {\n\t\t\ta, err := c.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.addActions = append(c.addActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\n\t}\n\tfor _, actName := range c.cfg.OnDelete {\n\t\tif cfg, ok := c.actionsConfig[actName]; ok {\n\t\t\ta, err := c.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.delActions = append(c.delActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\t}\n\tc.numActions = len(c.addActions) + len(c.delActions)\n\tc.logger.Printf(\"initialized consul loader: %+v\", c.cfg)\n\treturn nil\n}\n\nfunc (c *consulLoader) Start(ctx context.Context) chan *loaders.TargetOperation {\n\topChan := make(chan *loaders.TargetOperation)\n\tvar err error\nCLIENT:\n\terr = c.initClient()\n\tif err != nil {\n\t\tc.logger.Printf(\"Failed to create a Consul client:%v\", err)\n\t\tconsulLoaderWatchError.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\ttime.Sleep(2 * time.Second)\n\t\tgoto CLIENT\n\t}\n\tsChan := make(chan []*api.ServiceEntry)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase ses, ok := <-sChan:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttcs := make(map[string]*types.TargetConfig)\n\t\t\t\tsrvName := \"\"\n\t\t\t\tfor _, se := range ses {\n\t\t\t\t\tsrvName = se.Service.Service\n\t\t\t\t\ttc, err := c.serviceEntryToTargetConfig(se)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.logger.Printf(\"Failed to convert service entry %+v to a target config: %v\", se, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ttcs[tc.Name] = tc\n\t\t\t\t}\n\n\t\t\t\tc.updateTargets(ctx, srvName, tcs, opChan)\n\t\t\t}\n\t\t}\n\t}()\n\tfor _, s := range c.cfg.Services {\n\t\tgo func(s *serviceDef) {\n\t\t\terr := c.startServicesWatch(ctx, s.Name, s.Tags, sChan, time.Minute)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"service %q watch stopped: %v\", s.Name, err)\n\t\t\t}\n\t\t}(s)\n\t}\n\treturn opChan\n}\n\nfunc (c *consulLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\tif err := c.initClient(); err != nil {\n\t\treturn nil, err\n\t}\n\tresult := make(map[string]*types.TargetConfig)\n\trsChan := make(chan *api.ServiceEntry)\n\twg := new(sync.WaitGroup)\n\n\t// fan-out queries\n\tfor _, s := range c.cfg.Services {\n\t\twg.Add(1)\n\t\tgo func(s *serviceDef) {\n\t\t\tdefer wg.Done()\n\t\t\tses, _, err := c.client.Health().ServiceMultipleTags(s.Name, s.Tags, true, &api.QueryOptions{})\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed to get service %q instances: %v\", s.Name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, se := range ses {\n\t\t\t\tselect {\n\t\t\t\tcase rsChan <- se:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(s)\n\t}\n\n\t// closer\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(rsChan)\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase se, ok := <-rsChan:\n\t\t\tif !ok {\n\t\t\t\treturn result, nil\n\t\t\t}\n\t\t\ttc, err := c.serviceEntryToTargetConfig(se)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed to convert service %+v to target config: %v\", se, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif tc != nil {\n\t\t\t\tresult[tc.Name] = tc\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn result, ctx.Err()\n\t\t}\n\t}\n}\n\n//\n\nfunc (c *consulLoader) initClient() error {\n\tvar err error\n\tif c.client != nil {\n\t\t_, err = c.client.Agent().Self()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\t// create a new client\n\tclientConfig := &api.Config{\n\t\tAddress:    c.cfg.Address,\n\t\tScheme:     \"http\",\n\t\tDatacenter: c.cfg.Datacenter,\n\t\tToken:      c.cfg.Token,\n\t}\n\tif c.cfg.Username != \"\" && c.cfg.Password != \"\" {\n\t\tclientConfig.HttpAuth = &api.HttpBasicAuth{\n\t\t\tUsername: c.cfg.Username,\n\t\t\tPassword: c.cfg.Password,\n\t\t}\n\t}\n\tc.client, err = api.NewClient(clientConfig)\n\treturn err\n}\n\nfunc (c *consulLoader) setDefaults() error {\n\tif c.cfg.Address == \"\" {\n\t\tc.cfg.Address = defaultAddress\n\t}\n\tif c.cfg.Datacenter == \"\" {\n\t\tc.cfg.Datacenter = \"dc1\"\n\t}\n\tif c.cfg.KeyPrefix == \"\" && len(c.cfg.Services) == 0 {\n\t\tc.cfg.KeyPrefix = defaultPrefix\n\t}\n\tif c.cfg.ActionsTimeout <= 0 {\n\t\tc.cfg.ActionsTimeout = defaultActionTimeout\n\t}\n\treturn nil\n}\n\nfunc (c *consulLoader) startServicesWatch(ctx context.Context, serviceName string, tags []string, sChan chan<- []*api.ServiceEntry, watchTimeout time.Duration) error {\n\tif watchTimeout <= 0 {\n\t\twatchTimeout = defaultWatchTimeout\n\t}\n\tvar index uint64\n\tqOpts := &api.QueryOptions{\n\t\tWaitIndex: index,\n\t\tWaitTime:  watchTimeout,\n\t}\n\tvar err error\n\t// long blocking watch\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tif c.cfg.Debug {\n\t\t\t\tc.logger.Printf(\"(re)starting watch service=%q, index=%d\", serviceName, qOpts.WaitIndex)\n\t\t\t}\n\t\t\tindex, err = c.watch(qOpts.WithContext(ctx), serviceName, tags, sChan)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"service %q watch failed: %v\", serviceName, err)\n\t\t\t}\n\t\t\tif index == 1 {\n\t\t\t\tqOpts.WaitIndex = index\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif index > qOpts.WaitIndex {\n\t\t\t\tqOpts.WaitIndex = index\n\t\t\t}\n\t\t\t// reset WaitIndex if the returned index decreases\n\t\t\t// https://www.consul.io/api-docs/features/blocking#implementation-details\n\t\t\tif index < qOpts.WaitIndex {\n\t\t\t\tqOpts.WaitIndex = 0\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *consulLoader) watch(qOpts *api.QueryOptions, serviceName string, tags []string, sChan chan<- []*api.ServiceEntry) (uint64, error) {\n\tse, meta, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, qOpts)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif meta.LastIndex == qOpts.WaitIndex {\n\t\tc.logger.Printf(\"service=%q did not change\", serviceName)\n\t\treturn meta.LastIndex, nil\n\t}\n\tif len(se) == 0 {\n\t\treturn 1, nil\n\t}\n\tsChan <- se\n\treturn meta.LastIndex, nil\n}\n\nfunc (c *consulLoader) serviceEntryToTargetConfig(se *api.ServiceEntry) (*types.TargetConfig, error) {\n\ttc := new(types.TargetConfig)\n\tif se.Service == nil {\n\t\treturn tc, nil\n\t}\n\nSRV:\n\tfor _, sd := range c.cfg.Services {\n\t\t// match service name\n\t\tif se.Service.Service != sd.Name {\n\t\t\tcontinue\n\t\t}\n\n\t\t// match service tags\n\t\tif len(sd.tags) > 0 {\n\t\t\tfor requiredTag := range sd.tags {\n\t\t\t\tif !slices.Contains(se.Service.Tags, requiredTag) {\n\t\t\t\t\tgoto SRV\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// decode config if present\n\t\tif sd.Config != nil {\n\t\t\terr := mapstructure.Decode(sd.Config, tc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\ttc.Address = se.Service.Address\n\t\tif tc.Address == \"\" {\n\t\t\ttc.Address = se.Node.Address\n\t\t}\n\t\ttc.Address = net.JoinHostPort(tc.Address, strconv.Itoa(se.Service.Port))\n\n\t\tvar buffer bytes.Buffer\n\n\t\ttc.Name = se.Service.ID\n\n\t\tif sd.targetNameTemplate != nil {\n\t\t\tbuffer.Reset()\n\t\t\terr := sd.targetNameTemplate.Execute(&buffer, se.Service)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Println(\"Could not execute nameTemplate\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttc.Name = buffer.String()\n\t\t}\n\n\t\t// Create Event tags from Consul via templates\n\t\tif len(sd.targetTagsTemplate) > 0 {\n\t\t\teventTags := make(map[string]string)\n\t\t\tfor tagName, tagTemplate := range sd.targetTagsTemplate {\n\t\t\t\tbuffer.Reset()\n\t\t\t\terr := tagTemplate.Execute(&buffer, se.Service)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.logger.Println(\"Could not execute tagTemplate:\", tagName)\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\teventTags[tagName] = buffer.String()\n\t\t\t}\n\t\t\ttc.EventTags = eventTags\n\t\t}\n\t\treturn tc, nil\n\t}\n\n\treturn nil, errors.New(\"unable to find a match in Consul service(s)\")\n}\n\nfunc (c *consulLoader) updateTargets(ctx context.Context, srvName string, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) {\n\ttargetOp, err := c.runActions(ctx, tcs, loaders.Diff(c.lastTargets[srvName], tcs))\n\tif err != nil {\n\t\tc.logger.Printf(\"failed to run actions: %v\", err)\n\t\treturn\n\t}\n\tnumAdds := len(targetOp.Add)\n\tnumDels := len(targetOp.Del)\n\tif c.cfg.Debug {\n\t\tc.logger.Printf(\"updating service %s with targets=%v\", srvName, tcs)\n\t\tc.logger.Printf(\"updating service %s with op=%v\", srvName, targetOp)\n\t}\n\tdefer func() {\n\t\tconsulLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds))\n\t\tconsulLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels))\n\t}()\n\n\tif numAdds+numDels == 0 {\n\t\treturn\n\t}\n\tc.m.Lock()\n\tif _, ok := c.lastTargets[srvName]; !ok {\n\t\tc.lastTargets[srvName] = make(map[string]*types.TargetConfig)\n\t}\n\t// do delete first since change is delete+add\n\tfor _, del := range targetOp.Del {\n\t\tdelete(c.lastTargets[srvName], del)\n\t}\n\tfor _, add := range targetOp.Add {\n\t\tc.lastTargets[srvName][add.Name] = add\n\t}\n\tc.m.Unlock()\n\n\topChan <- targetOp\n}\n\n//\n\nfunc (c *consulLoader) readVars(ctx context.Context) error {\n\tif c.cfg.VarsFile == \"\" {\n\t\tc.vars = c.cfg.Vars\n\t\treturn nil\n\t}\n\tb, err := gfile.ReadFile(ctx, c.cfg.VarsFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := make(map[string]interface{})\n\terr = yaml.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.vars = utils.MergeMaps(v, c.cfg.Vars)\n\treturn nil\n}\n\nfunc (c *consulLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) {\n\tif len(cfg) == 0 {\n\t\treturn nil, errors.New(\"missing action definition\")\n\t}\n\tif actType, ok := cfg[\"type\"]; ok {\n\t\tswitch actType := actType.(type) {\n\t\tcase string:\n\t\t\tif in, ok := actions.Actions[actType]; ok {\n\t\t\t\tact := in()\n\t\t\t\terr := act.Init(cfg, actions.WithLogger(c.logger), actions.WithTargets(nil))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn act, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unknown action type %q\", actType)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected action field type %T\", actType)\n\t\t}\n\t}\n\treturn nil, errors.New(\"missing type field under action\")\n}\n\nfunc (c *consulLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) {\n\tif c.numActions == 0 {\n\t\treturn targetOp, nil\n\t}\n\tvar err error\n\t// some actions are defined\n\tfor _, tc := range tcs {\n\t\terr = c.targetConfigFn(tc)\n\t\tif err != nil {\n\t\t\tc.logger.Printf(\"failed running target config fn on target %q\", tc.Name)\n\t\t}\n\t}\n\n\t// run target config func and build map of targets configs\n\tfor i, tAdd := range targetOp.Add {\n\t\terr = c.targetConfigFn(tAdd)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttargetOp.Add[i] = tAdd\n\t}\n\n\topChan := make(chan *loaders.TargetOperation)\n\tdoneCh := make(chan struct{})\n\tresult := &loaders.TargetOperation{\n\t\tAdd: make(map[string]*types.TargetConfig, len(targetOp.Add)),\n\t\tDel: make([]string, 0, len(targetOp.Del)),\n\t}\n\tctx, cancel := context.WithTimeout(ctx, c.cfg.ActionsTimeout)\n\tdefer cancel()\n\t// start operation gathering goroutine\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase op, ok := <-opChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(doneCh)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfor n, t := range op.Add {\n\t\t\t\t\tresult.Add[n] = t\n\t\t\t\t}\n\t\t\t\tresult.Del = append(result.Del, op.Del...)\n\t\t\t}\n\t\t}\n\t}()\n\t// create waitGroup and add the number of target operations to it\n\twg := new(sync.WaitGroup)\n\twg.Add(len(targetOp.Add) + len(targetOp.Del))\n\t// run OnAdd actions\n\tfor n, tAdd := range targetOp.Add {\n\t\tgo func(n string, tc *types.TargetConfig) {\n\t\t\tdefer wg.Done()\n\t\t\terr := c.runOnAddActions(ctx, tc.Name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed running OnAdd actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}}\n\t\t}(n, tAdd)\n\t}\n\t// run OnDelete actions\n\tfor _, tDel := range targetOp.Del {\n\t\tgo func(name string) {\n\t\t\tdefer wg.Done()\n\t\t\terr := c.runOnDeleteActions(ctx, name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed running OnDelete actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Del: []string{name}}\n\t\t}(tDel)\n\t}\n\twg.Wait()\n\tclose(opChan)\n\t<-doneCh //wait for gathering goroutine to finish\n\treturn result, nil\n}\n\nfunc (c *consulLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error {\n\taCtx := &actions.Context{\n\t\tInput:   tName,\n\t\tEnv:     make(map[string]any),\n\t\tVars:    c.vars,\n\t\tTargets: tcs,\n\t}\n\tfor _, act := range c.addActions {\n\t\tc.logger.Printf(\"running action %q for target %q\", act.NName(), tName)\n\t\tres, err := act.Run(ctx, aCtx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\n\t\taCtx.Env[act.NName()] = utils.Convert(res)\n\t\tif c.cfg.Debug {\n\t\t\tc.logger.Printf(\"action %q, target %q result: %+v\", act.NName(), tName, res)\n\t\t\tb, _ := json.MarshalIndent(aCtx, \"\", \"  \")\n\t\t\tc.logger.Printf(\"action %q context:\\n%s\", act.NName(), string(b))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *consulLoader) runOnDeleteActions(ctx context.Context, tName string, _ map[string]*types.TargetConfig) error {\n\tenv := make(map[string]interface{})\n\tfor _, act := range c.delActions {\n\t\tres, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: c.vars})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\t\tenv[act.NName()] = res\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/consul_loader/consul_loader_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_loader\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nvar consulLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"consul_loader\",\n\tName:      \"number_of_loaded_targets\",\n\tHelp:      \"Number of new targets successfully loaded\",\n}, []string{\"loader_type\"})\n\nvar consulLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"consul_loader\",\n\tName:      \"number_of_deleted_targets\",\n\tHelp:      \"Number of targets successfully deleted\",\n}, []string{\"loader_type\"})\n\nvar consulLoaderWatchError = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"consul_loader\",\n\tName:      \"number_of_watch_errors\",\n\tHelp:      \"Number of watch errors\",\n}, []string{\"loader_type\", \"error\"})\n\nfunc initMetrics() {\n\tconsulLoaderLoadedTargets.WithLabelValues(loaderType).Set(0)\n\tconsulLoaderDeletedTargets.WithLabelValues(loaderType).Set(0)\n\tconsulLoaderWatchError.WithLabelValues(loaderType, \"\").Add(0)\n}\n\nfunc registerMetrics(reg *prometheus.Registry) error {\n\tinitMetrics()\n\tvar err error\n\tif err = reg.Register(consulLoaderLoadedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(consulLoaderDeletedTargets); err != nil {\n\t\treturn err\n\t}\n\treturn reg.Register(consulLoaderWatchError)\n}\n"
  },
  {
    "path": "pkg/loaders/consul_loader/consul_loader_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_loader\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com/hashicorp/consul/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\n// Test the specific bug scenario described in issue #706\n// This test reproduces the exact problem: services with extra metadata tags\n// were being silently filtered out by the old logic\nfunc TestIssue706_ServicesWithExtraTagsFiltered(t *testing.T) {\n\tcl := &consulLoader{\n\t\tlogger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\tcfg: &cfg{\n\t\t\tServices: []*serviceDef{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\tTags: []string{\"gnmic\", \"network-device\"},\n\t\t\t\t\ttags: map[string]struct{}{\n\t\t\t\t\t\t\"gnmic\":          {},\n\t\t\t\t\t\t\"network-device\": {},\n\t\t\t\t\t},\n\t\t\t\t\tConfig: map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"test-target\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\terr := cl.Init(context.Background(), nil, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected Init to succeed, but got error: %v\", err)\n\t}\n\t// Service with extra metadata tags - this should NOT be filtered out\n\tserviceEntry := &api.ServiceEntry{\n\t\tService: &api.AgentService{\n\t\t\tID:      \"test-service-1\",\n\t\t\tService: \"test-service\",\n\t\t\tTags:    []string{\"gnmic\", \"network-device\", \"vendor:arista\", \"environment:production\"},\n\t\t\tAddress: \"192.168.1.100\",\n\t\t\tPort:    57400,\n\t\t},\n\t\tNode: &api.Node{\n\t\t\tAddress: \"192.168.1.100\",\n\t\t},\n\t}\n\n\tresult, err := cl.serviceEntryToTargetConfig(serviceEntry)\n\n\tif err != nil {\n\t\tt.Fatalf(\"Expected service with extra tags to be accepted, but got error: %v\", err)\n\t}\n\n\tif result == nil {\n\t\tt.Fatal(\"Expected service with extra tags to be accepted, but got nil result\")\n\t}\n\n\tif result.Name != \"test-target\" {\n\t\tt.Errorf(\"Expected target name 'test-target', got: %s\", result.Name)\n\t}\n\n\tif result.Address != \"192.168.1.100:57400\" {\n\t\tt.Errorf(\"Expected address '192.168.1.100:57400', got: %s\", result.Address)\n\t}\n}\n\n// Test case that would demonstrate the old buggy behavior\n// This test explicitly documents what the old code was doing wrong\nfunc TestOldBuggyLogicWouldReject(t *testing.T) {\n\t// Simulate what the OLD buggy logic was doing:\n\t// for _, t := range se.Service.Tags {\n\t//     if _, ok := sd.tags[t]; !ok {\n\t//         goto SRV  // Reject service because of extra tag\n\t//     }\n\t// }\n\n\trequiredTags := map[string]struct{}{\n\t\t\"gnmic\":          {},\n\t\t\"network-device\": {},\n\t}\n\n\tserviceTags := []string{\"gnmic\", \"network-device\", \"vendor:arista\", \"environment:production\"}\n\n\t// This is what the OLD code was doing (buggy logic)\n\toldLogicWouldReject := false\n\tfor _, serviceTag := range serviceTags {\n\t\tif _, ok := requiredTags[serviceTag]; !ok {\n\t\t\toldLogicWouldReject = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// The old logic would incorrectly reject this service\n\tif !oldLogicWouldReject {\n\t\tt.Error(\"This test is invalid - the old buggy logic should have rejected this service\")\n\t}\n\n\t// But the NEW logic should accept it (all required tags are present)\n\tnewLogicShouldAccept := true\n\tfor requiredTag := range requiredTags {\n\t\tfound := false\n\t\tfor _, serviceTag := range serviceTags {\n\t\t\tif serviceTag == requiredTag {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tnewLogicShouldAccept = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !newLogicShouldAccept {\n\t\tt.Error(\"The new logic should accept this service since all required tags are present\")\n\t}\n\n\tt.Logf(\"✓ Old logic would incorrectly reject: %v\", oldLogicWouldReject)\n\tt.Logf(\"✓ New logic correctly accepts: %v\", newLogicShouldAccept)\n}\n"
  },
  {
    "path": "pkg/loaders/consul_loader/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_loader\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nfunc (c *consulLoader) RegisterMetrics(reg *prometheus.Registry) {\n\tif !c.cfg.EnableMetrics {\n\t\treturn\n\t}\n\tif reg == nil {\n\t\tc.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn\n\t}\n\tif err := registerMetrics(reg); err != nil {\n\t\tc.logger.Printf(\"failed to register metrics: %v\", err)\n\t}\n}\n\nfunc (c *consulLoader) WithActions(acts map[string]map[string]interface{}) {\n\tc.actionsConfig = acts\n}\n\nfunc (c *consulLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) {\n\tc.targetConfigFn = fn\n}\n"
  },
  {
    "path": "pkg/loaders/docker_loader/docker_loader.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage docker_loader\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/docker/docker/api/types/network\"\n\tdClient \"github.com/docker/docker/client\"\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nconst (\n\tloggingPrefix = \"[docker_loader] \"\n\twatchInterval = 30 * time.Second\n\tloaderType    = \"docker\"\n)\n\nfunc init() {\n\tloaders.Register(loaderType, func() loaders.TargetLoader {\n\t\treturn &dockerLoader{\n\t\t\tcfg:         new(cfg),\n\t\t\twg:          new(sync.WaitGroup),\n\t\t\tm:           new(sync.Mutex),\n\t\t\tlastTargets: make(map[string]*types.TargetConfig),\n\t\t\tlogger:      log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\ntype dockerLoader struct {\n\tcfg    *cfg\n\tclient *dClient.Client\n\twg     *sync.WaitGroup\n\n\tm              *sync.Mutex\n\tlastTargets    map[string]*types.TargetConfig\n\ttargetConfigFn func(*types.TargetConfig) error\n\tlogger         *log.Logger\n\tfl             []*targetFilterComp\n\t//\n\tvars          map[string]interface{}\n\tactionsConfig map[string]map[string]interface{}\n\taddActions    []actions.Action\n\tdelActions    []actions.Action\n\tnumActions    int\n}\n\ntype targetFilterComp struct {\n\tfl   []filters.Args\n\tnt   filters.Args\n\tport string\n\tcfg  map[string]interface{}\n}\n\ntype cfg struct {\n\t// address of docker daemon API\n\tAddress string `json:\"address,omitempty\" mapstructure:\"address,omitempty\"`\n\t// interval between docker daemon queries\n\tInterval time.Duration `json:\"interval,omitempty\" mapstructure:\"interval,omitempty\"`\n\t// timeout of docker daemon queries\n\tTimeout time.Duration `json:\"timeout,omitempty\" mapstructure:\"timeout,omitempty\"`\n\t// docker filter to apply on queried docker containers\n\tFilters []*targetFilter `json:\"filters,omitempty\" mapstructure:\"filters,omitempty\"`\n\t// time to wait before the first docker filter query\n\tStartDelay time.Duration `json:\"start-delay,omitempty\" mapstructure:\"start-delay,omitempty\"`\n\t// enable debug mode for more logging messages\n\tDebug bool `json:\"debug,omitempty\" mapstructure:\"debug,omitempty\"`\n\t// if true, registers dockerLoader prometheus metrics with the provided\n\t// prometheus registry\n\tEnableMetrics bool `json:\"enable-metrics,omitempty\" mapstructure:\"enable-metrics,omitempty\"`\n\t// variables definitions to be passed to the actions\n\tVars map[string]interface{}\n\t// variable file, values in this file will be overwritten by\n\t// the ones defined in Vars\n\tVarsFile string `mapstructure:\"vars-file,omitempty\"`\n\t// list of Actions to run on new target discovery\n\tOnAdd []string `json:\"on-add,omitempty\" mapstructure:\"on-add,omitempty\"`\n\t// list of Actions to run on target removal\n\tOnDelete []string `json:\"on-delete,omitempty\" mapstructure:\"on-delete,omitempty\"`\n}\n\ntype targetFilter struct {\n\tContainers []map[string]string    `json:\"containers,omitempty\" mapstructure:\"containers,omitempty\"`\n\tNetwork    map[string]string      `json:\"network,omitempty\" mapstructure:\"network,omitempty\"`\n\tPort       string                 `json:\"port,omitempty\" mapstructure:\"port,omitempty\"`\n\tConfig     map[string]interface{} `json:\"config,omitempty\" mapstructure:\"config,omitempty\"`\n}\n\nfunc (d *dockerLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error {\n\terr := loaders.DecodeConfig(cfg, d.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.setDefaults()\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\td.fl = make([]*targetFilterComp, 0, len(d.cfg.Filters))\n\tfor _, fm := range d.cfg.Filters {\n\t\t// network filter\n\t\tnflt := filters.NewArgs()\n\t\tfor k, v := range fm.Network {\n\t\t\tnflt.Add(k, v)\n\t\t}\n\t\t// container filters\n\t\tcflt := make([]filters.Args, 0, len(fm.Containers))\n\t\tfor _, sfm := range fm.Containers {\n\t\t\tflt := filters.NewArgs(filters.KeyValuePair{\n\t\t\t\tKey:   \"status\",\n\t\t\t\tValue: \"running\",\n\t\t\t})\n\t\t\tfor k, v := range sfm {\n\t\t\t\tif strings.Contains(k, \"=\") {\n\t\t\t\t\tks := strings.SplitN(k, \"=\", 2)\n\t\t\t\t\tflt.Add(ks[0], strings.Join(append(ks[1:], v), \"=\"))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tflt.Add(k, v)\n\t\t\t}\n\t\t\tcflt = append(cflt, flt)\n\t\t}\n\t\t// target filters\n\t\td.fl = append(d.fl, &targetFilterComp{\n\t\t\tfl:   cflt,\n\t\t\tnt:   nflt,\n\t\t\tport: fm.Port,\n\t\t\tcfg:  fm.Config,\n\t\t})\n\t}\n\n\tif logger != nil {\n\t\td.logger.SetOutput(logger.Writer())\n\t\td.logger.SetFlags(logger.Flags())\n\t}\n\n\td.client, err = d.createDockerClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tping, err := d.client.Ping(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.readVars(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, actName := range d.cfg.OnAdd {\n\t\tif cfg, ok := d.actionsConfig[actName]; ok {\n\t\t\tfmt.Println(cfg)\n\t\t\ta, err := d.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\td.addActions = append(d.addActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\n\t}\n\tfor _, actName := range d.cfg.OnDelete {\n\t\tif cfg, ok := d.actionsConfig[actName]; ok {\n\t\t\ta, err := d.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\td.delActions = append(d.delActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\t}\n\td.numActions = len(d.addActions) + len(d.delActions)\n\td.logger.Printf(\"connected to docker daemon: %+v\", ping)\n\td.logger.Printf(\"initialized loader type %q: %s\", loaderType, d)\n\treturn nil\n}\n\nfunc (d *dockerLoader) setDefaults() {\n\tif d.cfg.Interval <= 0 {\n\t\td.cfg.Interval = watchInterval\n\t}\n\tif d.cfg.Timeout <= 0 || d.cfg.Timeout >= d.cfg.Interval {\n\t\td.cfg.Timeout = d.cfg.Interval / 2\n\t}\n\tif len(d.cfg.Filters) == 0 {\n\t\td.cfg.Filters = []*targetFilter{\n\t\t\t{\n\t\t\t\tContainers: []map[string]string{\n\t\t\t\t\t{\"status\": \"running\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n}\n\nfunc (d *dockerLoader) createDockerClient() (*dClient.Client, error) {\n\tvar opts []dClient.Opt\n\tif d.cfg.Address == \"\" {\n\t\topts = []dClient.Opt{\n\t\t\tdClient.FromEnv,\n\t\t\tdClient.WithTimeout(d.cfg.Timeout),\n\t\t}\n\t} else {\n\t\topts = []dClient.Opt{\n\t\t\tdClient.WithAPIVersionNegotiation(),\n\t\t\tdClient.WithHost(d.cfg.Address),\n\t\t\tdClient.WithTimeout(d.cfg.Timeout),\n\t\t}\n\t}\n\treturn dClient.NewClientWithOpts(opts...)\n}\n\nfunc (d *dockerLoader) Start(ctx context.Context) chan *loaders.TargetOperation {\n\topChan := make(chan *loaders.TargetOperation)\n\tticker := time.NewTicker(d.cfg.Interval)\n\tgo func() {\n\t\tdefer close(opChan)\n\t\tdefer ticker.Stop()\n\t\ttime.Sleep(d.cfg.StartDelay)\n\t\t// first run\n\t\td.update(ctx, opChan)\n\t\t// periodic runs\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\td.logger.Printf(\"%q context done: %v\", loaderType, ctx.Err())\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\td.update(ctx, opChan)\n\t\t\t}\n\t\t}\n\t}()\n\treturn opChan\n}\n\nfunc (d *dockerLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\td.logger.Printf(\"querying %q targets\", loaderType)\n\treadTargets, err := d.getTargets(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif d.cfg.Debug {\n\t\td.logger.Printf(\"docker loader discovered %d target(s)\", len(readTargets))\n\t}\n\treturn readTargets, nil\n}\n\n// update runs the docker loader once and updates the added/remove target to the opChan\nfunc (d *dockerLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) {\n\treadTargets, err := d.RunOnce(ctx)\n\tif err != nil {\n\t\td.logger.Printf(\"failed to read targets from docker daemon: %v\", err)\n\t\treturn\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\td.updateTargets(ctx, readTargets, opChan)\n\t}\n}\n\nfunc (d *dockerLoader) getTargets(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\td.wg = new(sync.WaitGroup)\n\td.wg.Add(len(d.fl))\n\treadTargets := make(map[string]*types.TargetConfig)\n\tm := new(sync.Mutex)\n\terrChan := make(chan error, len(d.fl))\n\n\tstart := time.Now()\n\t// https://github.com/golang/go/issues/60048\n\tdefer func() {\n\t\tdockerLoaderListRequestDuration.WithLabelValues(loaderType).\n\t\t\tSet(float64(time.Since(start).Nanoseconds()))\n\t}()\n\n\tfor _, targetFilter := range d.fl {\n\t\tgo func(fl *targetFilterComp) {\n\t\t\tdockerLoaderListRequestsTotal.WithLabelValues(loaderType).Add(1)\n\t\t\tdefer d.wg.Done()\n\t\t\t// get networks\n\t\t\tnrs, err := d.client.NetworkList(ctx, network.ListOptions{\n\t\t\t\tFilters: fl.nt,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terrChan <- fmt.Errorf(\"failed getting networks list using filter %+v: %v\", fl.nt, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// get containers for each defined filter\n\t\t\tfor _, cfl := range fl.fl {\n\t\t\t\tconts, err := d.client.ContainerList(ctx, container.ListOptions{\n\t\t\t\t\tFilters: cfl,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- fmt.Errorf(\"failed getting containers list using filter %+v: %v\", cfl, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, cont := range conts {\n\t\t\t\t\td.logger.Printf(\"building target from container %q\", cont.Names)\n\t\t\t\t\ttc := new(types.TargetConfig)\n\t\t\t\t\tif fl.cfg != nil {\n\t\t\t\t\t\terr = mapstructure.Decode(fl.cfg, tc)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\td.logger.Printf(\"failed to decode config map: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// set target name\n\t\t\t\t\ttc.Name = cont.ID\n\t\t\t\t\tif len(cont.Names) > 0 {\n\t\t\t\t\t\ttc.Name = strings.TrimLeft(cont.Names[0], \"/\")\n\t\t\t\t\t}\n\t\t\t\t\t// discover target address and port\n\t\t\t\t\tswitch strings.ToLower(cont.HostConfig.NetworkMode) {\n\t\t\t\t\tcase \"host\":\n\t\t\t\t\t\tif d.cfg.Address == \"\" || strings.HasPrefix(d.cfg.Address, \"unix://\") {\n\t\t\t\t\t\t\ttc.Address = \"localhost\"\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttc.Address, _, err = net.SplitHostPort(d.cfg.Address)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif fl.port != \"\" {\n\t\t\t\t\t\t\tif !strings.Contains(fl.port, \"=\") {\n\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%s\", tc.Address, fl.port)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tportLabel := strings.Replace(fl.port, \"label=\", \"\", 1)\n\t\t\t\t\t\t\t\tif p, ok := cont.Labels[portLabel]; ok {\n\t\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%s\", tc.Address, p)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif strings.HasPrefix(d.cfg.Address, \"unix:///\") {\n\t\t\t\t\t\t\tfor _, nr := range nrs {\n\t\t\t\t\t\t\t\tif n, ok := cont.NetworkSettings.Networks[nr.Name]; ok {\n\t\t\t\t\t\t\t\t\tif n.IPAddress != \"\" {\n\t\t\t\t\t\t\t\t\t\ttc.Address = n.IPAddress\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\ttc.Address = n.GlobalIPv6Address\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif tc.Address == \"\" {\n\t\t\t\t\t\t\t\td.logger.Printf(\"%q no address found\", tc.Name)\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif fl.port != \"\" {\n\t\t\t\t\t\t\t\tif !strings.Contains(fl.port, \"=\") {\n\t\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%s\", tc.Address, fl.port)\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tportLabel := strings.Replace(fl.port, \"label=\", \"\", 1)\n\t\t\t\t\t\t\t\t\tif p, ok := cont.Labels[portLabel]; ok {\n\t\t\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%s\", tc.Address, p)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// get port from config/label\n\t\t\t\t\t\t\tport := getPortNumber(cont.Labels, fl.port)\n\t\t\t\t\t\t\t// check if port is exposed, find the public port and build the target address\n\t\t\t\t\t\t\tfor _, p := range cont.Ports {\n\t\t\t\t\t\t\t\t// the container private port matches the port from the docker label\n\t\t\t\t\t\t\t\tif p.PrivatePort == port && p.Type == \"tcp\" {\n\t\t\t\t\t\t\t\t\tipAddr := p.IP\n\t\t\t\t\t\t\t\t\tif ipAddr == \"0.0.0.0\" || ipAddr == \"::\" {\n\t\t\t\t\t\t\t\t\t\tif d.cfg.Address == \"\" {\n\t\t\t\t\t\t\t\t\t\t\t// if docker daemon is empty use localhost as target address\n\t\t\t\t\t\t\t\t\t\t\tipAddr = \"localhost\"\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\t// derive target address from daemon address if not empty\n\t\t\t\t\t\t\t\t\t\t\tu, err := url.Parse(d.cfg.Address)\n\t\t\t\t\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\t\t\t\td.logger.Printf(\"failed to parse docker daemon address\")\n\t\t\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\tipAddr, _, _ = net.SplitHostPort(u.Host)\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tif ipAddr != \"\" && p.PublicPort != 0 {\n\t\t\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%d\", ipAddr, p.PublicPort)\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t// if an address was not found using the exposed ports\n\t\t\t\t\t\t\t// select the bridge address, and use the port from label if not zero\n\t\t\t\t\t\t\tif tc.Address == \"\" {\n\t\t\t\t\t\t\t\tfor _, nr := range nrs {\n\t\t\t\t\t\t\t\t\tif n, ok := cont.NetworkSettings.Networks[nr.Name]; ok {\n\t\t\t\t\t\t\t\t\t\tif n.IPAddress != \"\" {\n\t\t\t\t\t\t\t\t\t\t\ttc.Address = n.IPAddress\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\ttc.Address = n.GlobalIPv6Address\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif tc.Address == \"\" {\n\t\t\t\t\t\t\t\t\td.logger.Printf(\"%q no address found\", tc.Name)\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif port != 0 {\n\t\t\t\t\t\t\t\t\ttc.Address = fmt.Sprintf(\"%s:%d\", tc.Address, port)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t//\n\t\t\t\t\tif d.cfg.Debug {\n\t\t\t\t\t\td.logger.Printf(\"discovered target config %s with filter: %v\", tc, cfl)\n\t\t\t\t\t}\n\t\t\t\t\tm.Lock()\n\t\t\t\t\treadTargets[tc.Name] = tc\n\t\t\t\t\tm.Unlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}(targetFilter)\n\t}\n\tvar errors = make([]error, 0)\n\tgo func() {\n\t\tfor err := range errChan {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}()\n\td.wg.Wait()\n\tclose(errChan)\n\tif len(errors) > 0 {\n\t\tfor _, err := range errors {\n\t\t\tdockerLoaderFailedListRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\td.logger.Printf(\"%v\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"there was %d error(s)\", len(errors))\n\t}\n\treturn readTargets, nil\n}\n\nfunc (d *dockerLoader) diff(m map[string]*types.TargetConfig) *loaders.TargetOperation {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\tresult := loaders.Diff(d.lastTargets, m)\n\tfor _, t := range result.Add {\n\t\tif _, ok := d.lastTargets[t.Name]; !ok {\n\t\t\td.lastTargets[t.Name] = t\n\t\t}\n\t}\n\tfor _, n := range result.Del {\n\t\tdelete(d.lastTargets, n)\n\t}\n\tdockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(len(result.Add)))\n\tdockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(len(result.Del)))\n\tif d.cfg.Debug {\n\t\tb, err := json.MarshalIndent(result, \"\", \"  \")\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"discovery diff result: %v\", result)\n\t\t} else {\n\t\t\td.logger.Printf(\"discovery diff result:\\n%s\", string(b))\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (d *dockerLoader) String() string {\n\tb, err := json.Marshal(d.cfg)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%+v\", d.cfg)\n\t}\n\treturn string(b)\n}\n\nfunc (d *dockerLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) {\n\tvar err error\n\tfor _, tc := range tcs {\n\t\terr = d.targetConfigFn(tc)\n\t\tif err != nil {\n\t\t\td.logger.Printf(\"failed running target config fn on target %q\", tc.Name)\n\t\t}\n\t}\n\ttargetOp, err := d.runActions(ctx, tcs, d.diff(tcs))\n\tif err != nil {\n\t\td.logger.Printf(\"failed to run actions: %v\", err)\n\t\treturn\n\t}\n\tnumAdds := len(targetOp.Add)\n\tnumDels := len(targetOp.Del)\n\tdefer func() {\n\t\tdockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds))\n\t\tdockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels))\n\t}()\n\tif numAdds+numDels == 0 {\n\t\treturn\n\t}\n\td.m.Lock()\n\t// do deletes first since change is delete+add\n\tfor _, del := range targetOp.Del {\n\t\tdelete(d.lastTargets, del)\n\t}\n\tfor _, add := range targetOp.Add {\n\t\td.lastTargets[add.Name] = add\n\t}\n\td.m.Unlock()\n\topChan <- targetOp\n}\n\nfunc (d *dockerLoader) readVars(ctx context.Context) error {\n\tif d.cfg.VarsFile == \"\" {\n\t\td.vars = d.cfg.Vars\n\t\treturn nil\n\t}\n\tb, err := gfile.ReadFile(ctx, d.cfg.VarsFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := make(map[string]interface{})\n\terr = yaml.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.vars = utils.MergeMaps(v, d.cfg.Vars)\n\treturn nil\n}\n\nfunc (d *dockerLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) {\n\tif len(cfg) == 0 {\n\t\treturn nil, errors.New(\"missing action definition\")\n\t}\n\tif actType, ok := cfg[\"type\"]; ok {\n\t\tswitch actType := actType.(type) {\n\t\tcase string:\n\t\t\tif in, ok := actions.Actions[actType]; ok {\n\t\t\t\tact := in()\n\t\t\t\terr := act.Init(cfg, actions.WithLogger(d.logger), actions.WithTargets(nil))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn act, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unknown action type %q\", actType)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected action field type %T\", actType)\n\t\t}\n\t}\n\treturn nil, errors.New(\"missing type field under action\")\n}\n\nfunc (d *dockerLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) {\n\tif d.numActions == 0 {\n\t\treturn targetOp, nil\n\t}\n\topChan := make(chan *loaders.TargetOperation)\n\t// some actions are defined,\n\tdoneCh := make(chan struct{})\n\tresult := &loaders.TargetOperation{\n\t\tAdd: make(map[string]*types.TargetConfig, len(targetOp.Add)),\n\t\tDel: make([]string, 0, len(targetOp.Del)),\n\t}\n\tctx, cancel := context.WithTimeout(ctx, d.cfg.Interval)\n\tdefer cancel()\n\t// start gathering goroutine\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(doneCh)\n\t\t\t\treturn\n\t\t\tcase op, ok := <-opChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(doneCh)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfor n, t := range op.Add {\n\t\t\t\t\tresult.Add[n] = t\n\t\t\t\t}\n\t\t\t\tresult.Del = append(result.Del, op.Del...)\n\t\t\t}\n\t\t}\n\t}()\n\t// create waitGroup and add the number of target operations to it\n\twg := new(sync.WaitGroup)\n\twg.Add(len(targetOp.Add) + len(targetOp.Del))\n\t// run OnAdd actions\n\tfor n, tAdd := range targetOp.Add {\n\t\tgo func(n string, tc *types.TargetConfig) {\n\t\t\tdefer wg.Done()\n\t\t\terr := d.runOnAddActions(ctx, tc.Name, tcs)\n\t\t\tif err != nil {\n\t\t\t\td.logger.Printf(\"failed running OnAdd actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}}\n\t\t}(n, tAdd)\n\t}\n\t// run OnDelete actions\n\tfor _, tDel := range targetOp.Del {\n\t\tgo func(name string) {\n\t\t\tdefer wg.Done()\n\t\t\terr := d.runOnDeleteActions(ctx, name)\n\t\t\tif err != nil {\n\t\t\t\td.logger.Printf(\"failed running OnDelete actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Del: []string{name}}\n\t\t}(tDel)\n\t}\n\twg.Wait()\n\tclose(opChan)\n\t<-doneCh //wait for gathering goroutine to finish\n\treturn result, nil\n}\n\nfunc (d *dockerLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error {\n\taCtx := &actions.Context{\n\t\tInput:   tName,\n\t\tEnv:     make(map[string]interface{}),\n\t\tVars:    d.vars,\n\t\tTargets: tcs,\n\t}\n\tfor _, act := range d.addActions {\n\t\td.logger.Printf(\"running action %q for target %q\", act.NName(), tName)\n\t\tres, err := act.Run(ctx, aCtx)\n\t\tif err != nil {\n\t\t\t// delete target from known targets map\n\t\t\td.m.Lock()\n\t\t\tdelete(d.lastTargets, tName)\n\t\t\td.m.Unlock()\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\n\t\taCtx.Env[act.NName()] = utils.Convert(res)\n\t\tif d.cfg.Debug {\n\t\t\td.logger.Printf(\"action %q, target %q result: %+v\", act.NName(), tName, res)\n\t\t\tb, _ := json.MarshalIndent(aCtx, \"\", \"  \")\n\t\t\td.logger.Printf(\"action %q context:\\n%s\", act.NName(), string(b))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *dockerLoader) runOnDeleteActions(ctx context.Context, tName string) error {\n\tenv := make(map[string]interface{})\n\tfor _, act := range d.delActions {\n\t\tres, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\t\tenv[act.NName()] = res\n\t}\n\treturn nil\n}\n\n/// helpers\n\nfunc getPortNumber(labels map[string]string, p string) uint16 {\n\tvar port uint16\n\tif p != \"\" {\n\t\tif !strings.Contains(p, \"=\") {\n\t\t\tp, _ := strconv.Atoi(p)\n\t\t\tport = uint16(p)\n\t\t} else {\n\t\t\ts := labels[strings.Replace(p, \"label=\", \"\", 1)]\n\t\t\tp, _ := strconv.Atoi(s)\n\t\t\tport = uint16(p)\n\t\t}\n\t}\n\treturn port\n}\n"
  },
  {
    "path": "pkg/loaders/docker_loader/docker_loader_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage docker_loader\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nvar dockerLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"docker_loader\",\n\tName:      \"number_of_loaded_targets\",\n\tHelp:      \"Number of new targets successfully loaded\",\n}, []string{\"loader_type\"})\n\nvar dockerLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"docker_loader\",\n\tName:      \"number_of_deleted_targets\",\n\tHelp:      \"Number of targets successfully deleted\",\n}, []string{\"loader_type\"})\n\nvar dockerLoaderFailedListRequests = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"docker_loader\",\n\tName:      \"number_of_failed_docker_list\",\n\tHelp:      \"Number of times a docker list failed\",\n}, []string{\"loader_type\", \"error\"})\n\nvar dockerLoaderListRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"docker_loader\",\n\tName:      \"number_of_docker_list_total\",\n\tHelp:      \"Number of times the loader sent a docker list request\",\n}, []string{\"loader_type\"})\n\nvar dockerLoaderListRequestDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"docker_loader\",\n\tName:      \"docker_list_duration_ns\",\n\tHelp:      \"Duration of docker list request in ns\",\n}, []string{\"loader_type\"})\n\nfunc initMetrics() {\n\tdockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(0)\n\tdockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(0)\n\tdockerLoaderFailedListRequests.WithLabelValues(loaderType, \"\").Add(0)\n\tdockerLoaderListRequestsTotal.WithLabelValues(loaderType).Add(0)\n\tdockerLoaderListRequestDuration.WithLabelValues(loaderType).Set(0)\n}\n\nfunc registerMetrics(reg *prometheus.Registry) error {\n\tif reg == nil {\n\t\treturn nil\n\t}\n\tinitMetrics()\n\tvar err error\n\tif err = reg.Register(dockerLoaderLoadedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(dockerLoaderDeletedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(dockerLoaderFailedListRequests); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(dockerLoaderListRequestsTotal); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(dockerLoaderListRequestDuration); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/docker_loader/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage docker_loader\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc (d *dockerLoader) RegisterMetrics(reg *prometheus.Registry) {\n\tif !d.cfg.EnableMetrics {\n\t\treturn\n\t}\n\tif reg == nil {\n\t\td.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn\n\t}\n\tif err := registerMetrics(reg); err != nil {\n\t\td.logger.Printf(\"failed to register metrics: %v\", err)\n\t}\n}\n\nfunc (d *dockerLoader) WithActions(acts map[string]map[string]interface{}) {\n\td.actionsConfig = acts\n}\n\nfunc (d *dockerLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) {\n\td.targetConfigFn = fn\n}\n"
  },
  {
    "path": "pkg/loaders/file_loader/file_loader.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file_loader\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nconst (\n\tloggingPrefix = \"[file_loader] \"\n\twatchInterval = 30 * time.Second\n\tloaderType    = \"file\"\n)\n\nfunc init() {\n\tloaders.Register(loaderType, func() loaders.TargetLoader {\n\t\treturn &fileLoader{\n\t\t\tcfg:         &cfg{},\n\t\t\tm:           new(sync.RWMutex),\n\t\t\tlastTargets: make(map[string]*types.TargetConfig),\n\t\t\tlogger:      log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\n// fileLoader implements the loaders.Loader interface.\n// it reads a configured file (local, ftp, sftp, http) periodically,\n// expects the file to contain a dictionary of types.TargetConfig.\n// It then adds new targets to gNMIc's targets and deletes the removed ones.\ntype fileLoader struct {\n\tcfg            *cfg\n\tm              *sync.RWMutex\n\tlastTargets    map[string]*types.TargetConfig\n\ttargetConfigFn func(*types.TargetConfig) error\n\tlogger         *log.Logger\n\t//\n\ttpl           *template.Template\n\tvars          map[string]interface{}\n\tactionsConfig map[string]map[string]interface{}\n\taddActions    []actions.Action\n\tdelActions    []actions.Action\n\tnumActions    int\n}\n\ntype cfg struct {\n\t// path the the file, if remote,\n\t// must include the proper protocol prefix ftp://, sftp://, http://\n\tPath string `json:\"path,omitempty\" mapstructure:\"path,omitempty\"`\n\t// the interval at which the file will be re read to load new targets\n\t// or delete removed ones.\n\tInterval time.Duration `json:\"interval,omitempty\" mapstructure:\"interval,omitempty\"`\n\t// a Go text template that can be used to transform the targets format read from the file to match\n\t// gNMIc's expected format.\n\tTemplate string `json:\"template,omitempty\" mapstructure:\"template,omitempty\"`\n\t// time to wait before the first file read\n\tStartDelay time.Duration `json:\"start-delay,omitempty\" mapstructure:\"start-delay,omitempty\"`\n\t// if true, registers fileLoader prometheus metrics with the provided\n\t// prometheus registry\n\tEnableMetrics bool `json:\"enable-metrics,omitempty\" mapstructure:\"enable-metrics,omitempty\"`\n\t// enable Debug\n\tDebug bool `json:\"debug,omitempty\" mapstructure:\"debug,omitempty\"`\n\t// variables definitions to be passed to the actions\n\tVars map[string]interface{} `json:\"vars,omitempty\" mapstructure:\"vars,omitempty\"`\n\t// variable file, values in this file will be overwritten by\n\t// the ones defined in Vars\n\tVarsFile string `json:\"vars-file,omitempty\" mapstructure:\"vars-file,omitempty\"`\n\t// list of Actions to run on new target discovery\n\tOnAdd []string `json:\"on-add,omitempty\" mapstructure:\"on-add,omitempty\"`\n\t// list of Actions to run on target removal\n\tOnDelete []string `json:\"on-delete,omitempty\" mapstructure:\"on-delete,omitempty\"`\n}\n\nfunc (f *fileLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error {\n\terr := loaders.DecodeConfig(cfg, f.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, o := range opts {\n\t\to(f)\n\t}\n\tif f.cfg.Path == \"\" {\n\t\treturn errors.New(\"missing file path\")\n\t}\n\tif f.cfg.Interval <= 0 {\n\t\tf.cfg.Interval = watchInterval\n\t}\n\tif logger != nil {\n\t\tf.logger.SetOutput(logger.Writer())\n\t\tf.logger.SetFlags(logger.Flags())\n\t}\n\tif f.cfg.Template != \"\" {\n\t\tf.tpl, err = gtemplate.CreateTemplate(\"file-loader-template\", f.cfg.Template)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = f.readVars(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, actName := range f.cfg.OnAdd {\n\t\tif cfg, ok := f.actionsConfig[actName]; ok {\n\t\t\ta, err := f.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.addActions = append(f.addActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\n\t}\n\tfor _, actName := range f.cfg.OnDelete {\n\t\tif cfg, ok := f.actionsConfig[actName]; ok {\n\t\t\ta, err := f.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.delActions = append(f.delActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\t}\n\tf.numActions = len(f.addActions) + len(f.delActions)\n\tf.logger.Printf(\"initialized loader type %q: %s\", loaderType, f)\n\treturn nil\n}\n\nfunc (f *fileLoader) String() string {\n\tb, err := json.Marshal(f.cfg)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%+v\", f.cfg)\n\t}\n\treturn string(b)\n}\n\nfunc (f *fileLoader) Start(ctx context.Context) chan *loaders.TargetOperation {\n\topChan := make(chan *loaders.TargetOperation)\n\tticker := time.NewTicker(f.cfg.Interval)\n\tgo func() {\n\t\tdefer close(opChan)\n\t\tdefer ticker.Stop()\n\t\ttime.Sleep(f.cfg.StartDelay)\n\t\tf.update(ctx, opChan)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tf.logger.Printf(\"%q context done: %v\", loaderType, ctx.Err())\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tf.update(ctx, opChan)\n\t\t\t}\n\t\t}\n\t}()\n\treturn opChan\n}\n\nfunc (f *fileLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\treadTargets, err := f.getTargets(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif f.cfg.Debug {\n\t\tf.logger.Printf(\"file loader discovered %d target(s)\", len(readTargets))\n\t}\n\treturn readTargets, nil\n}\n\nfunc (f *fileLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) {\n\treadTargets, err := f.RunOnce(ctx)\n\tif _, ok := err.(*os.PathError); ok {\n\t\tf.logger.Printf(\"path err: %v\", err)\n\t\treturn\n\t}\n\tif err != nil {\n\t\tf.logger.Printf(\"failed to read targets file: %v\", err)\n\t\treturn\n\t}\n\tselect {\n\t// check if the context is done before\n\t// updating the targets to the channel\n\tcase <-ctx.Done():\n\t\tf.logger.Printf(\"context done: %v\", ctx.Err())\n\t\treturn\n\tdefault:\n\t\tf.updateTargets(ctx, readTargets, opChan)\n\t}\n}\n\nfunc (f *fileLoader) getTargets(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\tfileLoaderFileReadTotal.WithLabelValues(loaderType).Add(1)\n\tstart := time.Now()\n\t// read file bytes based on the path prefix\n\tctx, cancel := context.WithTimeout(ctx, f.cfg.Interval/2)\n\tdefer cancel()\n\tb, err := gfile.ReadFile(ctx, f.cfg.Path)\n\tfileLoaderFileReadDuration.WithLabelValues(loaderType).Set(float64(time.Since(start).Nanoseconds()))\n\tif err != nil {\n\t\tfileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\treturn nil, err\n\t}\n\tif f.tpl != nil {\n\t\tvar input interface{}\n\t\terr = json.Unmarshal(b, input)\n\t\tif err != nil {\n\t\t\tfileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\terr = f.tpl.Execute(buf, input)\n\t\tif err != nil {\n\t\t\tfileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\treturn nil, err\n\t\t}\n\t\tb = buf.Bytes()\n\t}\n\tresult := make(map[string]*types.TargetConfig)\n\t// unmarshal the bytes into a map of targetConfigs\n\terr = yaml.Unmarshal(b, result)\n\tif err != nil {\n\t\tfileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\treturn nil, err\n\t}\n\t// properly initialize address and name if not set\n\tfor n, t := range result {\n\t\tif t == nil && n != \"\" {\n\t\t\tresult[n] = &types.TargetConfig{\n\t\t\t\tName:    n,\n\t\t\t\tAddress: n,\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif t.Name == \"\" {\n\t\t\tt.Name = n\n\t\t}\n\t\tif t.Address == \"\" {\n\t\t\tt.Address = n\n\t\t}\n\t}\n\tif f.cfg.Debug {\n\t\tf.logger.Printf(\"result: %s\", result)\n\t}\n\treturn result, nil\n}\n\nfunc (f *fileLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) {\n\tvar err error\n\tif f.targetConfigFn != nil {\n\t\tfor _, tc := range tcs {\n\t\t\terr = f.targetConfigFn(tc)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed running target config fn on target %q\", tc.Name)\n\t\t\t}\n\t\t}\n\t}\n\ttargetOp, err := f.runActions(ctx, tcs, loaders.Diff(f.lastTargets, tcs))\n\tif err != nil {\n\t\tf.logger.Printf(\"failed to run actions: %v\", err)\n\t\treturn\n\t}\n\tnumAdds := len(targetOp.Add)\n\tnumDels := len(targetOp.Del)\n\tdefer func() {\n\t\tfileLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds))\n\t\tfileLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels))\n\t}()\n\tif numAdds+numDels == 0 {\n\t\treturn\n\t}\n\tf.m.Lock()\n\t// do delete first since change is delete+add\n\tfor _, del := range targetOp.Del {\n\t\tdelete(f.lastTargets, del)\n\t}\n\tfor _, add := range targetOp.Add {\n\t\tf.lastTargets[add.Name] = add\n\t}\n\tf.m.Unlock()\n\topChan <- targetOp\n}\n\nfunc (f *fileLoader) readVars(ctx context.Context) error {\n\tif f.cfg.VarsFile == \"\" {\n\t\tf.vars = f.cfg.Vars\n\t\treturn nil\n\t}\n\tb, err := gfile.ReadFile(ctx, f.cfg.VarsFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := make(map[string]interface{})\n\terr = yaml.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.vars = utils.MergeMaps(v, f.cfg.Vars)\n\treturn nil\n}\n\nfunc (f *fileLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) {\n\tif len(cfg) == 0 {\n\t\treturn nil, errors.New(\"missing action definition\")\n\t}\n\tif actType, ok := cfg[\"type\"]; ok {\n\t\tswitch actType := actType.(type) {\n\t\tcase string:\n\t\t\tif in, ok := actions.Actions[actType]; ok {\n\t\t\t\tact := in()\n\t\t\t\terr := act.Init(cfg, actions.WithLogger(f.logger), actions.WithTargets(nil))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn act, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unknown action type %q\", actType)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected action field type %T\", actType)\n\t\t}\n\t}\n\treturn nil, errors.New(\"missing type field under action\")\n}\n\nfunc (f *fileLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) {\n\tif f.numActions == 0 {\n\t\treturn targetOp, nil\n\t}\n\topChan := make(chan *loaders.TargetOperation)\n\t// some actions are defined,\n\tdoneCh := make(chan struct{})\n\tresult := &loaders.TargetOperation{\n\t\tAdd: make(map[string]*types.TargetConfig, len(targetOp.Add)),\n\t\tDel: make([]string, 0, len(targetOp.Del)),\n\t}\n\tctx, cancel := context.WithTimeout(ctx, f.cfg.Interval)\n\tdefer cancel()\n\t// start gathering goroutine\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(doneCh)\n\t\t\t\treturn\n\t\t\tcase op, ok := <-opChan:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(doneCh)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfor n, t := range op.Add {\n\t\t\t\t\tresult.Add[n] = t\n\t\t\t\t}\n\t\t\t\tresult.Del = append(result.Del, op.Del...)\n\t\t\t}\n\t\t}\n\t}()\n\t// create waitGroup and add the number of target operations to it\n\twg := new(sync.WaitGroup)\n\twg.Add(len(targetOp.Add) + len(targetOp.Del))\n\t// run OnAdd actions\n\tfor n, tAdd := range targetOp.Add {\n\t\tgo func(n string, tc *types.TargetConfig) {\n\t\t\tdefer wg.Done()\n\t\t\terr := f.runOnAddActions(ctx, tc.Name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed running OnAdd actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}}\n\t\t}(n, tAdd)\n\t}\n\t// run OnDelete actions\n\tfor _, tDel := range targetOp.Del {\n\t\tgo func(name string) {\n\t\t\tdefer wg.Done()\n\t\t\terr := f.runOnDeleteActions(ctx, name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed running OnDelete actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\topChan <- &loaders.TargetOperation{Del: []string{name}}\n\t\t}(tDel)\n\t}\n\twg.Wait()\n\tclose(opChan)\n\t<-doneCh //wait for gathering goroutine to finish\n\treturn result, nil\n}\n\nfunc (d *fileLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error {\n\taCtx := &actions.Context{\n\t\tInput:   tName,\n\t\tEnv:     make(map[string]interface{}),\n\t\tVars:    d.vars,\n\t\tTargets: tcs,\n\t}\n\tfor _, act := range d.addActions {\n\t\td.logger.Printf(\"running action %q for target %q\", act.NName(), tName)\n\t\tres, err := act.Run(ctx, aCtx)\n\t\tif err != nil {\n\t\t\t// delete target from known targets map\n\t\t\td.m.Lock()\n\t\t\tdelete(d.lastTargets, tName)\n\t\t\td.m.Unlock()\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\n\t\taCtx.Env[act.NName()] = utils.Convert(res)\n\t\tif d.cfg.Debug {\n\t\t\td.logger.Printf(\"action %q, target %q result: %+v\", act.NName(), tName, res)\n\t\t\tb, _ := json.MarshalIndent(aCtx, \"\", \"  \")\n\t\t\td.logger.Printf(\"action %q context:\\n%s\", act.NName(), string(b))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *fileLoader) runOnDeleteActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error {\n\tenv := make(map[string]interface{})\n\tfor _, act := range d.delActions {\n\t\tres, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\t\tenv[act.NName()] = res\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/file_loader/file_loader_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file_loader\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nvar fileLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_loader\",\n\tName:      \"number_of_loaded_targets\",\n\tHelp:      \"Number of new targets successfully loaded\",\n}, []string{\"loader_type\"})\n\nvar fileLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_loader\",\n\tName:      \"number_of_deleted_targets\",\n\tHelp:      \"Number of targets successfully deleted\",\n}, []string{\"loader_type\"})\n\nvar fileLoaderFailedFileRead = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_loader\",\n\tName:      \"number_of_failed_file_reads\",\n\tHelp:      \"Number of times gnmic failed to read the file\",\n}, []string{\"loader_type\", \"error\"})\n\nvar fileLoaderFileReadTotal = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_loader\",\n\tName:      \"number_of_file_read_attempts_total\",\n\tHelp:      \"Number of times the loader attempted to read the file\",\n}, []string{\"loader_type\"})\n\nvar fileLoaderFileReadDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_loader\",\n\tName:      \"file_read_duration_ns\",\n\tHelp:      \"Duration of file read in ns\",\n}, []string{\"loader_type\"})\n\nfunc initMetrics() {\n\tfileLoaderLoadedTargets.WithLabelValues(loaderType).Set(0)\n\tfileLoaderDeletedTargets.WithLabelValues(loaderType).Set(0)\n\tfileLoaderFailedFileRead.WithLabelValues(loaderType, \"\").Add(0)\n\tfileLoaderFileReadTotal.WithLabelValues(loaderType).Add(0)\n\tfileLoaderFileReadDuration.WithLabelValues(loaderType).Set(0)\n}\n\nfunc registerMetrics(reg *prometheus.Registry) error {\n\tinitMetrics()\n\tvar err error\n\tif err = reg.Register(fileLoaderLoadedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(fileLoaderDeletedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(fileLoaderFailedFileRead); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(fileLoaderFileReadTotal); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(fileLoaderFileReadDuration); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/file_loader/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file_loader\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc (f *fileLoader) RegisterMetrics(reg *prometheus.Registry) {\n\tif !f.cfg.EnableMetrics {\n\t\treturn\n\t}\n\tif reg == nil {\n\t\tf.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn\n\t}\n\tif err := registerMetrics(reg); err != nil {\n\t\tf.logger.Printf(\"failed to register metrics: %v\", err)\n\t}\n}\n\nfunc (f *fileLoader) WithActions(acts map[string]map[string]interface{}) {\n\tf.actionsConfig = acts\n}\n\nfunc (f *fileLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) {\n\tf.targetConfigFn = fn\n}\n"
  },
  {
    "path": "pkg/loaders/http_loader/http_loader.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_loader\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/go-resty/resty/v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\tgfile \"github.com/openconfig/gnmic/pkg/file\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\nconst (\n\tloggingPrefix   = \"[http_loader] \"\n\tloaderType      = \"http\"\n\tdefaultInterval = 1 * time.Minute\n\tdefaultTimeout  = 50 * time.Second\n)\n\nfunc init() {\n\tloaders.Register(loaderType, func() loaders.TargetLoader {\n\t\treturn &httpLoader{\n\t\t\tcfg:         &cfg{},\n\t\t\tm:           new(sync.RWMutex),\n\t\t\tlastTargets: make(map[string]*types.TargetConfig),\n\t\t\tlogger:      log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\ntype httpLoader struct {\n\tcfg            *cfg\n\tm              *sync.RWMutex\n\tlastTargets    map[string]*types.TargetConfig\n\ttargetConfigFn func(*types.TargetConfig) error\n\tlogger         *log.Logger\n\t//\n\ttpl           *template.Template\n\tvars          map[string]interface{}\n\tactionsConfig map[string]map[string]interface{}\n\taddActions    []actions.Action\n\tdelActions    []actions.Action\n\tnumActions    int\n}\n\ntype cfg struct {\n\t// the server URL, must include http or https as a prefix\n\tURL string `json:\"url,omitempty\" mapstructure:\"url,omitempty\"`\n\t// server query interval\n\tInterval time.Duration `json:\"interval,omitempty\" mapstructure:\"interval,omitempty\"`\n\t// query timeout\n\tTimeout time.Duration `json:\"timeout,omitempty\" mapstructure:\"timeout,omitempty\"`\n\t// TLS config\n\tTLS *types.TLSConfig `json:\"tls,omitempty\" mapstructure:\"tls,omitempty\"`\n\t// SkipVerify bool   `json:\"skip-verify,omitempty\" mapstructure:\"skip-verify,omitempty\"`\n\t// CAFile     string `json:\"ca-file,omitempty\" mapstructure:\"ca-file,omitempty\"`\n\t// CertFile   string `json:\"cert-file,omitempty\" mapstructure:\"cert-file,omitempty\"`\n\t// KeyFile    string `json:\"key-file,omitempty\" mapstructure:\"key-file,omitempty\"`\n\t// HTTP basicAuth\n\tUsername string `json:\"username,omitempty\" mapstructure:\"username,omitempty\"`\n\tPassword string `json:\"password,omitempty\" mapstructure:\"password,omitempty\"`\n\t// Oauth2\n\tToken string `json:\"token,omitempty\" mapstructure:\"token,omitempty\"`\n\t// the auth scheme. The default auth scheme is `Bearer``.\n\tAuthScheme string `json:\"auth-scheme,omitempty\" mapstructure:\"auth-scheme,omitempty\"`\n\t// a Go text template that can be used to transform the targets format\n\t// read from the remote http server to match gNMIc's expected format.\n\tTemplate string `json:\"template,omitempty\" mapstructure:\"template,omitempty\"`\n\t// a Go text template that can be used to transform the targets format\n\t// read from the remote http server to match gNMIc's expected format.\n\tTemplateFile string `json:\"template-file,omitempty\" mapstructure:\"template-file,omitempty\"`\n\t// time to wait before the first http query\n\tStartDelay time.Duration `json:\"start-delay,omitempty\" mapstructure:\"start-delay,omitempty\"`\n\t// if true, registers httpLoader prometheus metrics with the provided\n\t// prometheus registry\n\tEnableMetrics bool `json:\"enable-metrics,omitempty\" mapstructure:\"enable-metrics,omitempty\"`\n\t// enable Debug\n\tDebug bool `json:\"debug,omitempty\" mapstructure:\"debug,omitempty\"`\n\t// variables definitions to be passed to the actions\n\tVars map[string]interface{}\n\t// variable file, values in this file will be overwritten by\n\t// the ones defined in Vars\n\tVarsFile string `mapstructure:\"vars-file,omitempty\"`\n\t// list of Actions to run on new target discovery\n\tOnAdd []string `json:\"on-add,omitempty\" mapstructure:\"on-add,omitempty\"`\n\t// list of Actions to run on target removal\n\tOnDelete []string `json:\"on-delete,omitempty\" mapstructure:\"on-delete,omitempty\"`\n}\n\nfunc (h *httpLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error {\n\terr := loaders.DecodeConfig(cfg, h.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = h.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, o := range opts {\n\t\to(h)\n\t}\n\tif logger != nil {\n\t\th.logger.SetOutput(logger.Writer())\n\t\th.logger.SetFlags(logger.Flags())\n\t}\n\tif h.cfg.Template != \"\" {\n\t\th.tpl, err = gtemplate.CreateTemplate(\"http-loader-template\", h.cfg.Template)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif h.cfg.TemplateFile != \"\" {\n\t\th.tpl, err = gtemplate.CreateFileTemplate(h.cfg.TemplateFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = h.readVars(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, actName := range h.cfg.OnAdd {\n\t\tif cfg, ok := h.actionsConfig[actName]; ok {\n\t\t\ta, err := h.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\th.addActions = append(h.addActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\n\t}\n\tfor _, actName := range h.cfg.OnDelete {\n\t\tif cfg, ok := h.actionsConfig[actName]; ok {\n\t\t\ta, err := h.initializeAction(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\th.delActions = append(h.delActions, a)\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"unknown action name %q\", actName)\n\t}\n\th.numActions = len(h.addActions) + len(h.delActions)\n\treturn nil\n}\n\nfunc (h *httpLoader) Start(ctx context.Context) chan *loaders.TargetOperation {\n\topChan := make(chan *loaders.TargetOperation)\n\tticker := time.NewTicker(h.cfg.Interval)\n\tgo func() {\n\t\tdefer close(opChan)\n\t\tdefer ticker.Stop()\n\t\ttime.Sleep(h.cfg.StartDelay)\n\t\th.update(ctx, opChan)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\th.logger.Printf(\"%q context done: %v\", loaderType, ctx.Err())\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\th.update(ctx, opChan)\n\t\t\t}\n\t\t}\n\t}()\n\treturn opChan\n}\n\nfunc (h *httpLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) {\n\treadTargets, err := h.getTargets()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif h.cfg.Debug {\n\t\th.logger.Printf(\"http loader discovered %d target(s)\", len(readTargets))\n\t}\n\treturn readTargets, nil\n}\n\nfunc (h *httpLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) {\n\treadTargets, err := h.getTargets()\n\tif err != nil {\n\t\th.logger.Printf(\"failed to read targets from HTTP server: %v\", err)\n\t\treturn\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\th.updateTargets(ctx, readTargets, opChan)\n\t}\n}\n\nfunc (h *httpLoader) setDefaults() error {\n\tif h.cfg.URL == \"\" {\n\t\treturn errors.New(\"missing URL\")\n\t}\n\tif h.cfg.Interval <= 0 {\n\t\th.cfg.Interval = defaultInterval\n\t}\n\tif h.cfg.Timeout <= 0 {\n\t\th.cfg.Timeout = defaultTimeout\n\t}\n\treturn nil\n}\n\nfunc (h *httpLoader) getTargets() (map[string]*types.TargetConfig, error) {\n\tc := resty.New()\n\tif h.cfg.TLS != nil {\n\t\ttlsCfg, err := utils.NewTLSConfig(h.cfg.TLS.CaFile, h.cfg.TLS.CertFile, h.cfg.TLS.KeyFile, \"\", h.cfg.TLS.SkipVerify, false)\n\t\tif err != nil {\n\t\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\treturn nil, err\n\t\t}\n\t\tif tlsCfg != nil {\n\t\t\tc = c.SetTLSClientConfig(tlsCfg)\n\t\t}\n\t}\n\tc.SetTimeout(h.cfg.Timeout)\n\tif h.cfg.Username != \"\" && h.cfg.Password != \"\" {\n\t\tc.SetBasicAuth(h.cfg.Username, h.cfg.Password)\n\t}\n\tif h.cfg.Token != \"\" {\n\t\tc.SetAuthToken(h.cfg.Token)\n\t}\n\tif h.cfg.AuthScheme != \"\" {\n\t\tc.SetAuthScheme(h.cfg.AuthScheme)\n\t}\n\tstart := time.Now()\n\thttpLoaderGetRequestsTotal.WithLabelValues(loaderType).Add(1)\n\trsp, err := c.R().SetHeader(\"Accept\", \"application/json\").Get(h.cfg.URL)\n\tif err != nil {\n\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\treturn nil, err\n\t}\n\thttpLoaderGetRequestDuration.WithLabelValues(loaderType).Set(float64(time.Since(start).Nanoseconds()))\n\tif rsp.StatusCode() != 200 {\n\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, rsp.Status())\n\t\treturn nil, fmt.Errorf(\"failed request, code=%d\", rsp.StatusCode())\n\t}\n\tb := rsp.Body()\n\tif h.tpl != nil {\n\t\tvar input interface{}\n\t\terr = json.Unmarshal(b, &input)\n\t\tif err != nil {\n\t\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\terr = h.tpl.Execute(buf, input)\n\t\tif err != nil {\n\t\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\t\treturn nil, err\n\t\t}\n\t\tb = buf.Bytes()\n\t}\n\n\tresult := make(map[string]*types.TargetConfig)\n\t// unmarshal the bytes into a map of targetConfigs\n\terr = json.Unmarshal(b, &result)\n\tif err != nil {\n\t\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf(\"%v\", err)).Add(1)\n\t\treturn nil, err\n\t}\n\t// properly initialize address and name if not set\n\tfor n, t := range result {\n\t\tif t == nil && n != \"\" {\n\t\t\tresult[n] = &types.TargetConfig{\n\t\t\t\tName:    n,\n\t\t\t\tAddress: n,\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif t.Name == \"\" {\n\t\t\tt.Name = n\n\t\t}\n\t\tif t.Address == \"\" {\n\t\t\tt.Address = n\n\t\t}\n\t}\n\tif h.cfg.Debug {\n\t\th.logger.Printf(\"result: %+v\", result)\n\t}\n\treturn result, nil\n}\n\nfunc (h *httpLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) {\n\tvar err error\n\tfor _, tc := range tcs {\n\t\terr = h.targetConfigFn(tc)\n\t\tif err != nil {\n\t\t\th.logger.Printf(\"failed running target config fn on target %q\", tc.Name)\n\t\t}\n\t}\n\ttargetOp, err := h.runActions(ctx, tcs, loaders.Diff(h.lastTargets, tcs))\n\tif err != nil {\n\t\th.logger.Printf(\"failed to run actions: %v\", err)\n\t\treturn\n\t}\n\tnumAdds := len(targetOp.Add)\n\tnumDels := len(targetOp.Del)\n\tdefer func() {\n\t\thttpLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds))\n\t\thttpLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels))\n\t}()\n\tif numAdds+numDels == 0 {\n\t\treturn\n\t}\n\th.m.Lock()\n\t// do delete first, since target change\n\t// consists of delete and add\n\tfor _, n := range targetOp.Del {\n\t\tdelete(h.lastTargets, n)\n\t}\n\tfor n, t := range targetOp.Add {\n\t\tif _, ok := h.lastTargets[n]; !ok {\n\t\t\th.lastTargets[n] = t\n\t\t}\n\t}\n\th.m.Unlock()\n\topChan <- targetOp\n}\n\nfunc (h *httpLoader) readVars(ctx context.Context) error {\n\tif h.cfg.VarsFile == \"\" {\n\t\th.vars = h.cfg.Vars\n\t\treturn nil\n\t}\n\tctx, cancel := context.WithTimeout(ctx, h.cfg.Interval)\n\tdefer cancel()\n\tb, err := gfile.ReadFile(ctx, h.cfg.VarsFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := make(map[string]interface{})\n\terr = yaml.Unmarshal(b, &v)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.vars = utils.MergeMaps(v, h.cfg.Vars)\n\treturn nil\n}\n\nfunc (h *httpLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) {\n\tif len(cfg) == 0 {\n\t\treturn nil, errors.New(\"missing action definition\")\n\t}\n\tif actType, ok := cfg[\"type\"]; ok {\n\t\tswitch actType := actType.(type) {\n\t\tcase string:\n\t\t\tif in, ok := actions.Actions[actType]; ok {\n\t\t\t\tact := in()\n\t\t\t\terr := act.Init(cfg, actions.WithLogger(h.logger), actions.WithTargets(nil))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn act, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unknown action type %q\", actType)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected action field type %T\", actType)\n\t\t}\n\t}\n\treturn nil, errors.New(\"missing type field under action\")\n}\n\nfunc (f *httpLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) {\n\tif f.numActions == 0 {\n\t\treturn targetOp, nil\n\t}\n\tresult := &loaders.TargetOperation{\n\t\tAdd: make(map[string]*types.TargetConfig, len(targetOp.Add)),\n\t\tDel: make([]string, 0, len(targetOp.Del)),\n\t}\n\tvar resultMu sync.Mutex\n\tctx, cancel := context.WithTimeout(ctx, f.cfg.Interval)\n\tdefer cancel()\n\t// create waitGroup and add the number of target operations to it\n\twgDelete := new(sync.WaitGroup)\n\twgDelete.Add(len(targetOp.Del))\n\t// run OnDelete actions first, since change==delete+add\n\tfor _, tDel := range targetOp.Del {\n\t\tgo func(name string) {\n\t\t\tdefer wgDelete.Done()\n\t\t\terr := f.runOnDeleteActions(ctx, name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed running OnDelete actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresultMu.Lock()\n\t\t\tresult.Del = append(result.Del, name)\n\t\t\tresultMu.Unlock()\n\t\t}(tDel)\n\t}\n\twgDelete.Wait()\n\n\twgAdd := new(sync.WaitGroup)\n\twgAdd.Add(len(targetOp.Add))\n\n\t// run OnAdd actions\n\tfor n, tAdd := range targetOp.Add {\n\t\tgo func(n string, tc *types.TargetConfig) {\n\t\t\tdefer wgAdd.Done()\n\t\t\terr := f.runOnAddActions(ctx, tc.Name, tcs)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed running OnAdd actions: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresultMu.Lock()\n\t\t\tresult.Add[n] = tc\n\t\t\tresultMu.Unlock()\n\t\t}(n, tAdd)\n\t}\n\n\twgAdd.Wait()\n\treturn result, nil\n}\n\nfunc (d *httpLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error {\n\taCtx := &actions.Context{\n\t\tInput:   tName,\n\t\tEnv:     make(map[string]interface{}),\n\t\tVars:    d.vars,\n\t\tTargets: tcs,\n\t}\n\tfor _, act := range d.addActions {\n\t\td.logger.Printf(\"running action %q for target %q\", act.NName(), tName)\n\t\tres, err := act.Run(ctx, aCtx)\n\t\tif err != nil {\n\t\t\t// delete target from known targets map\n\t\t\td.m.Lock()\n\t\t\tdelete(d.lastTargets, tName)\n\t\t\td.m.Unlock()\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\n\t\taCtx.Env[act.NName()] = utils.Convert(res)\n\t\tif d.cfg.Debug {\n\t\t\td.logger.Printf(\"action %q, target %q result: %+v\", act.NName(), tName, res)\n\t\t\tb, _ := json.MarshalIndent(aCtx, \"\", \"  \")\n\t\t\td.logger.Printf(\"action %q context:\\n%s\", act.NName(), string(b))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *httpLoader) runOnDeleteActions(ctx context.Context, tName string, _ map[string]*types.TargetConfig) error {\n\tenv := make(map[string]any)\n\tfor _, act := range d.delActions {\n\t\tres, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"action %q for target %q failed: %v\", act.NName(), tName, err)\n\t\t}\n\t\tenv[act.NName()] = res\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/http_loader/http_loader_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_loader\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nvar httpLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"http_loader\",\n\tName:      \"number_of_loaded_targets\",\n\tHelp:      \"Number of new targets successfully loaded\",\n}, []string{\"loader_type\"})\n\nvar httpLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"http_loader\",\n\tName:      \"number_of_deleted_targets\",\n\tHelp:      \"Number of targets successfully deleted\",\n}, []string{\"loader_type\"})\n\nvar httpLoaderFailedGetRequests = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"http_loader\",\n\tName:      \"number_of_failed_http_requests\",\n\tHelp:      \"Number of times the http Get request failed\",\n}, []string{\"loader_type\", \"error\"})\n\nvar httpLoaderGetRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"http_loader\",\n\tName:      \"number_of_http_requests_total\",\n\tHelp:      \"Number of times the loader sent an HTTP request\",\n}, []string{\"loader_type\"})\n\nvar httpLoaderGetRequestDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"http_loader\",\n\tName:      \"http_request_duration_ns\",\n\tHelp:      \"Duration of http request in ns\",\n}, []string{\"loader_type\"})\n\nfunc initMetrics() {\n\thttpLoaderLoadedTargets.WithLabelValues(loaderType).Set(0)\n\thttpLoaderDeletedTargets.WithLabelValues(loaderType).Set(0)\n\thttpLoaderFailedGetRequests.WithLabelValues(loaderType, \"\").Add(0)\n\thttpLoaderGetRequestsTotal.WithLabelValues(loaderType).Add(0)\n\thttpLoaderGetRequestDuration.WithLabelValues(loaderType).Set(0)\n}\n\nfunc registerMetrics(reg *prometheus.Registry) error {\n\tinitMetrics()\n\tvar err error\n\tif err = reg.Register(httpLoaderLoadedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(httpLoaderDeletedTargets); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(httpLoaderFailedGetRequests); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(httpLoaderGetRequestsTotal); err != nil {\n\t\treturn err\n\t}\n\tif err = reg.Register(httpLoaderGetRequestDuration); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/loaders/http_loader/http_loader_test.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_loader\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"gopkg.in/yaml.v2\"\n\n\t\"github.com/openconfig/gnmic/pkg/actions\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/loaders\"\n)\n\n// fakeAction is a minimal implementation of actions.Action for testing.\ntype fakeAction struct {\n\tname  string\n\tdelay time.Duration\n\tfail  bool\n}\n\nfunc (f *fakeAction) Init(cfg map[string]interface{}, opts ...actions.Option) error {\n\tif v, ok := cfg[\"name\"].(string); ok {\n\t\tf.name = v\n\t}\n\treturn nil\n}\nfunc (f *fakeAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) {\n\tif f.delay > 0 {\n\t\tselect {\n\t\tcase <-time.After(f.delay):\n\t\tcase <-ctx.Done():\n\t\t}\n\t}\n\tif f.fail {\n\t\treturn nil, errors.New(\"forced failure\")\n\t}\n\treturn \"ok\", nil\n}\nfunc (f *fakeAction) NName() string                              { return f.name }\nfunc (f *fakeAction) WithTargets(map[string]*types.TargetConfig) {}\nfunc (f *fakeAction) WithLogger(*log.Logger)                     {}\n\nfunc newTestLoader(t *testing.T) *httpLoader {\n\tt.Helper()\n\treturn &httpLoader{\n\t\tcfg:            &cfg{Interval: 500 * time.Millisecond},\n\t\tm:              new(sync.RWMutex),\n\t\tlastTargets:    make(map[string]*types.TargetConfig),\n\t\ttargetConfigFn: func(tc *types.TargetConfig) error { return nil },\n\t\tlogger:         log.New(io.Discard, \"\", 0),\n\t}\n}\n\nfunc TestRunActions_AddAndDelete_NoDeadlock(t *testing.T) {\n\thl := newTestLoader(t)\n\t// ensure actions are present to exercise the concurrent paths\n\thl.addActions = []actions.Action{&fakeAction{name: \"add1\", delay: 10 * time.Millisecond}}\n\thl.delActions = []actions.Action{&fakeAction{name: \"del1\", delay: 10 * time.Millisecond}}\n\thl.numActions = len(hl.addActions) + len(hl.delActions)\n\n\ttcs := map[string]*types.TargetConfig{\n\t\t\"t-add\": {Name: \"t-add\", Address: \"10.0.0.1\"},\n\t\t\"t-del\": {Name: \"t-del\", Address: \"10.0.0.2\"},\n\t}\n\top := &loaders.TargetOperation{\n\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\"t-add\": tcs[\"t-add\"],\n\t\t},\n\t\tDel: []string{\"t-del\"},\n\t}\n\n\tctx := context.Background()\n\n\tdone := make(chan struct{})\n\tvar res *loaders.TargetOperation\n\tvar err error\n\tgo func() {\n\t\tres, err = hl.runActions(ctx, tcs, op)\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"runActions returned error: %v\", err)\n\t\t}\n\t\tif _, ok := res.Add[\"t-add\"]; !ok {\n\t\t\tt.Fatalf(\"expected add for 't-add', got: %+v\", res.Add)\n\t\t}\n\t\tif !slices.Contains(res.Del, \"t-del\") {\n\t\t\tt.Fatalf(\"expected delete for 't-del', got: %+v\", res.Del)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"runActions timed out (possible deadlock)\")\n\t}\n}\n\nfunc TestRunActions_ReplaceSameName_NoDeadlock(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.addActions = []actions.Action{&fakeAction{name: \"add1\", delay: 10 * time.Millisecond}}\n\thl.delActions = []actions.Action{&fakeAction{name: \"del1\", delay: 10 * time.Millisecond}}\n\thl.numActions = len(hl.addActions) + len(hl.delActions)\n\n\toldTC := &types.TargetConfig{Name: \"t1\", Address: \"10.0.0.1\"}\n\tnewTC := &types.TargetConfig{Name: \"t1\", Address: \"10.0.0.1\"}\n\n\ttcs := map[string]*types.TargetConfig{\n\t\t\"t1\": newTC,\n\t}\n\top := &loaders.TargetOperation{\n\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\"t1\": newTC,\n\t\t},\n\t\tDel: []string{\"t1\"},\n\t}\n\t// seed lastTargets to emulate prior state (not directly used by runActions but mirrors scenario)\n\thl.lastTargets[\"t1\"] = oldTC\n\n\tctx := context.Background()\n\n\tdone := make(chan struct{})\n\tvar res *loaders.TargetOperation\n\tvar err error\n\tgo func() {\n\t\tres, err = hl.runActions(ctx, tcs, op)\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-done:\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"runActions returned error: %v\", err)\n\t\t}\n\t\tif _, ok := res.Add[\"t1\"]; !ok {\n\t\t\tt.Fatalf(\"expected add for 't1', got: %+v\", res.Add)\n\t\t}\n\t\tif !slices.Contains(res.Del, \"t1\") {\n\t\t\tt.Fatalf(\"expected delete for 't1', got: %+v\", res.Del)\n\t\t}\n\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"runActions timed out (possible deadlock)\")\n\t}\n}\n\nfunc TestSetDefaults(t *testing.T) {\n\t// missing URL\n\thl := newTestLoader(t)\n\thl.cfg.URL = \"\"\n\tif err := hl.setDefaults(); err == nil {\n\t\tt.Fatal(\"expected error for missing URL\")\n\t}\n\t// valid URL sets default interval/timeout\n\thl = newTestLoader(t)\n\thl.cfg.URL = \"http://localhost\"\n\thl.cfg.Interval = 0\n\thl.cfg.Timeout = 0\n\tif err := hl.setDefaults(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif hl.cfg.Interval <= 0 || hl.cfg.Timeout <= 0 {\n\t\tt.Fatal(\"expected defaults for interval and timeout to be set\")\n\t}\n}\n\nfunc TestReadVars_FromFileAndMerge(t *testing.T) {\n\t// create temp vars yaml\n\tdir := t.TempDir()\n\tvarsPath := filepath.Join(dir, \"vars.yaml\")\n\torig := map[string]interface{}{\"a\": 1, \"b\": map[string]interface{}{\"x\": \"y\"}}\n\tb, _ := yaml.Marshal(orig)\n\tif err := os.WriteFile(varsPath, b, 0600); err != nil {\n\t\tt.Fatalf(\"write vars file: %v\", err)\n\t}\n\thl := newTestLoader(t)\n\thl.cfg.VarsFile = varsPath\n\thl.cfg.Vars = map[string]interface{}{\"b\": map[string]interface{}{\"x\": \"z\", \"k\": \"v\"}, \"c\": 3}\n\tif err := hl.readVars(context.Background()); err != nil {\n\t\tt.Fatalf(\"readVars error: %v\", err)\n\t}\n\t// merged expectations: b.x overridden to z, b.k added, c added, a kept\n\tif fmt.Sprint(hl.vars[\"a\"]) != \"1\" {\n\t\tt.Fatalf(\"expected a=1, got %v\", hl.vars[\"a\"])\n\t}\n\tif m, ok := hl.vars[\"b\"].(map[string]interface{}); !ok || m[\"x\"] != \"z\" || m[\"k\"] != \"v\" {\n\t\tt.Fatalf(\"unexpected b: %#v\", hl.vars[\"b\"])\n\t}\n\tif fmt.Sprint(hl.vars[\"c\"]) != \"3\" {\n\t\tt.Fatalf(\"expected c=3, got %v\", hl.vars[\"c\"])\n\t}\n}\n\nfunc TestGetTargets_JSONAndTemplateAndNilEntries(t *testing.T) {\n\tplain := `{\"t1\": {\"name\":\"t1\",\"address\":\"1.1.1.1\"}, \"t2\": null}`\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t_, _ = w.Write([]byte(plain))\n\t}))\n\tdefer ts.Close()\n\n\t// no template\n\thl := newTestLoader(t)\n\thl.cfg.URL = ts.URL\n\tres, err := hl.getTargets()\n\tif err != nil {\n\t\tt.Fatalf(\"getTargets error: %v\", err)\n\t}\n\tif res[\"t1\"].Name != \"t1\" || res[\"t1\"].Address != \"1.1.1.1\" {\n\t\tt.Fatalf(\"unexpected t1: %#v\", res[\"t1\"])\n\t}\n\t// t2 is nil in input, should be auto-filled name/address\n\tif res[\"t2\"].Name != \"t2\" || res[\"t2\"].Address != \"t2\" {\n\t\tt.Fatalf(\"unexpected t2: %#v\", res[\"t2\"])\n\t}\n\n\t// template path: server returns array of objects -> map\n\tarr := `[{\"n\":\"a\",\"a\":\"10.0.0.1\"},{\"n\":\"b\"}]`\n\tts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t_, _ = w.Write([]byte(arr))\n\t}))\n\tdefer ts2.Close()\n\thl2 := newTestLoader(t)\n\thl2.cfg.URL = ts2.URL\n\t// static template\n\thl2.cfg.Template = `{\"a\":{\"name\":\"a\",\"address\":\"10.0.0.1\"},\"b\":{\"name\":\"b\"}}`\n\t// initialize template via Init\n\tif err := hl2.Init(context.Background(), map[string]interface{}{\"url\": hl2.cfg.URL, \"template\": hl2.cfg.Template}, hl2.logger); err != nil {\n\t\tt.Fatalf(\"init with template: %v\", err)\n\t}\n\tres2, err := hl2.getTargets()\n\tif err != nil {\n\t\tt.Fatalf(\"getTargets with template error: %v\", err)\n\t}\n\tif res2[\"a\"].Name != \"a\" || res2[\"a\"].Address != \"10.0.0.1\" {\n\t\tt.Fatalf(\"unexpected a: %#v\", res2[\"a\"])\n\t}\n\t// address missing -> should default to name\n\tif res2[\"b\"].Name != \"b\" || res2[\"b\"].Address != \"b\" {\n\t\tt.Fatalf(\"unexpected b: %#v\", res2[\"b\"])\n\t}\n}\n\nfunc TestInitializeAction(t *testing.T) {\n\thl := newTestLoader(t)\n\t// register a temporary action type\n\torig := actions.Actions[\"fake\"]\n\tactions.Actions[\"fake\"] = func() actions.Action { return &fakeAction{} }\n\tdefer func() { actions.Actions[\"fake\"] = orig }()\n\t// success\n\ta, err := hl.initializeAction(map[string]interface{}{\"type\": \"fake\", \"name\": \"x\"})\n\tif err != nil || a == nil {\n\t\tt.Fatalf(\"expected success, got err=%v action=%v\", err, a)\n\t}\n\t// unknown type\n\ta, err = hl.initializeAction(map[string]interface{}{\"type\": \"does-not-exist\"})\n\tif err == nil || a != nil {\n\t\tt.Fatalf(\"expected error for unknown type\")\n\t}\n\t// missing type\n\ta, err = hl.initializeAction(map[string]interface{}{})\n\tif err == nil || a != nil {\n\t\tt.Fatalf(\"expected error for missing type\")\n\t}\n}\n\nfunc TestRunOnAddActions_ErrorRemovesTarget(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.addActions = []actions.Action{&fakeAction{name: \"bad\", fail: true}}\n\thl.numActions = len(hl.addActions)\n\thl.lastTargets[\"t1\"] = &types.TargetConfig{Name: \"t1\"}\n\tctx := context.Background()\n\t// should return error and remove t1 from lastTargets\n\tif err := hl.runOnAddActions(ctx, \"t1\", map[string]*types.TargetConfig{\"t1\": {Name: \"t1\"}}); err == nil {\n\t\tt.Fatal(\"expected error from failing action\")\n\t}\n\thl.m.RLock()\n\t_, exists := hl.lastTargets[\"t1\"]\n\thl.m.RUnlock()\n\tif exists {\n\t\tt.Fatal(\"expected t1 to be removed from lastTargets\")\n\t}\n}\n\nfunc TestUpdateTargets_NoChange_NoOp(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.numActions = 0\n\t// two identical targets in lastTargets and tcs\n\tt1 := &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\tt2 := &types.TargetConfig{Name: \"t2\", Address: \"2.2.2.2\"}\n\thl.lastTargets[\"t1\"] = &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\thl.lastTargets[\"t2\"] = &types.TargetConfig{Name: \"t2\", Address: \"2.2.2.2\"}\n\tcalled := 0\n\thl.targetConfigFn = func(tc *types.TargetConfig) error { called++; return nil }\n\tch := make(chan *loaders.TargetOperation, 1)\n\thl.updateTargets(context.Background(), map[string]*types.TargetConfig{\"t1\": t1, \"t2\": t2}, ch)\n\tselect {\n\tcase op := <-ch:\n\t\tt.Fatalf(\"unexpected op received: %+v\", op)\n\tdefault:\n\t\t// ok, no op expected\n\t}\n\tif called != 2 {\n\t\tt.Fatalf(\"expected targetConfigFn to be called twice, got %d\", called)\n\t}\n}\n\nfunc TestUpdateTargets_Add(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.numActions = 0\n\tt1 := &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\tch := make(chan *loaders.TargetOperation, 1)\n\thl.updateTargets(context.Background(), map[string]*types.TargetConfig{\"t1\": t1}, ch)\n\tselect {\n\tcase op := <-ch:\n\t\tif len(op.Add) != 1 || len(op.Del) != 0 {\n\t\t\tt.Fatalf(\"unexpected op: %+v\", op)\n\t\t}\n\t\tif _, ok := hl.lastTargets[\"t1\"]; !ok {\n\t\t\tt.Fatal(\"expected t1 to be added to lastTargets\")\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timed out waiting for op\")\n\t}\n}\n\nfunc TestUpdateTargets_Delete(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.numActions = 0\n\thl.lastTargets[\"t1\"] = &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\tch := make(chan *loaders.TargetOperation, 1)\n\thl.updateTargets(context.Background(), map[string]*types.TargetConfig{}, ch)\n\tselect {\n\tcase op := <-ch:\n\t\tif len(op.Add) != 0 || len(op.Del) != 1 || op.Del[0] != \"t1\" {\n\t\t\tt.Fatalf(\"unexpected op: %+v\", op)\n\t\t}\n\t\tif _, ok := hl.lastTargets[\"t1\"]; ok {\n\t\t\tt.Fatal(\"expected t1 to be deleted from lastTargets\")\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timed out waiting for op\")\n\t}\n}\n\nfunc TestUpdateTargets_Change_TriggersDelAndAdd(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.numActions = 0\n\thl.lastTargets[\"t1\"] = &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\tnewT1 := &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.2\"}\n\tch := make(chan *loaders.TargetOperation, 1)\n\thl.updateTargets(context.Background(), map[string]*types.TargetConfig{\"t1\": newT1}, ch)\n\tselect {\n\tcase op := <-ch:\n\t\tif len(op.Add) != 1 || len(op.Del) != 1 || op.Del[0] != \"t1\" {\n\t\t\tt.Fatalf(\"unexpected op: %+v\", op)\n\t\t}\n\t\tif lt, ok := hl.lastTargets[\"t1\"]; !ok || lt.Address != \"1.1.1.2\" {\n\t\t\tt.Fatalf(\"expected lastTargets to have updated address, got: %+v\", lt)\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timed out waiting for op\")\n\t}\n}\n\nfunc TestUpdateTargets_Change_TriggersRename(t *testing.T) {\n\thl := newTestLoader(t)\n\thl.numActions = 0\n\thl.lastTargets[\"t1\"] = &types.TargetConfig{Name: \"t1\", Address: \"1.1.1.1\"}\n\tnewT2 := &types.TargetConfig{Name: \"t2\", Address: \"1.1.1.1\"}\n\tch := make(chan *loaders.TargetOperation, 1)\n\thl.updateTargets(context.Background(), map[string]*types.TargetConfig{\"t2\": newT2}, ch)\n\tselect {\n\tcase op := <-ch:\n\t\tif len(op.Add) != 1 || len(op.Del) != 1 || op.Del[0] != \"t1\" {\n\t\t\tt.Fatalf(\"unexpected op: %+v\", op)\n\t\t}\n\t\tif _, ok := hl.lastTargets[\"t2\"]; !ok {\n\t\t\tt.Fatal(\"expected t2 to be added to lastTargets\")\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"timed out waiting for op\")\n\t}\n}\n"
  },
  {
    "path": "pkg/loaders/http_loader/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage http_loader\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc (h *httpLoader) RegisterMetrics(reg *prometheus.Registry) {\n\tif !h.cfg.EnableMetrics {\n\t\treturn\n\t}\n\tif reg == nil {\n\t\th.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn\n\t}\n\tif err := registerMetrics(reg); err != nil {\n\t\th.logger.Printf(\"failed to register metrics: %v\", err)\n\t}\n}\n\nfunc (h *httpLoader) WithActions(acts map[string]map[string]interface{}) {\n\th.actionsConfig = acts\n}\n\nfunc (h *httpLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) {\n\th.targetConfigFn = fn\n}\n"
  },
  {
    "path": "pkg/loaders/loaders.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage loaders\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"maps\"\n\t\"reflect\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// TargetLoader discovers a set of target configurations for gNMIc to run RPCs against.\n// RunOnce should return a map of target configs and is meant to be used with Unary RPCs.\n// Start runs a goroutine in the background that updates added/removed target configs on the\n// returned channel.\ntype TargetLoader interface {\n\t// Init initializes the target loader given the config, logger and options\n\tInit(ctx context.Context, cfg map[string]interface{}, l *log.Logger, opts ...Option) error\n\t// RunOnce runs the loader only once, returning a map of target configs\n\tRunOnce(ctx context.Context) (map[string]*types.TargetConfig, error)\n\t// Start starts the target loader, running periodic polls or a long watch.\n\t// It returns a channel of TargetOperation from which the function caller can\n\t// receive the added/removed target configs\n\tStart(context.Context) chan *TargetOperation\n\t// RegsiterMetrics registers the loader metrics with the provided registry\n\tRegisterMetrics(*prometheus.Registry)\n\t// WithActions passes the actions configuration to the target loader\n\tWithActions(map[string]map[string]interface{})\n\t// WithTargetsDefaults passes a callback function that sets the target config defaults\n\tWithTargetsDefaults(func(tc *types.TargetConfig) error)\n}\n\ntype Initializer func() TargetLoader\n\nvar Loaders = map[string]Initializer{}\n\nvar LoadersTypes = []string{\n\t\"file\",\n\t\"consul\",\n\t\"docker\",\n\t\"http\",\n}\n\nfunc Register(name string, initFn Initializer) {\n\tLoaders[name] = initFn\n}\n\ntype TargetOperation struct {\n\tAdd map[string]*types.TargetConfig\n\tDel []string\n}\n\nfunc DecodeConfig(src, dst interface{}) error {\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     dst,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn decoder.Decode(src)\n}\n\nfunc Diff(currentMap, newMap map[string]*types.TargetConfig) *TargetOperation {\n\tresult := &TargetOperation{\n\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\tDel: make([]string, 0),\n\t}\n\t// handle removed and added targets\n\tif len(currentMap) == 0 {\n\t\tmaps.Copy(result.Add, newMap)\n\t\treturn result\n\t}\n\tif len(newMap) == 0 {\n\t\tfor name := range currentMap {\n\t\t\tresult.Del = append(result.Del, name)\n\t\t}\n\t\treturn result\n\t}\n\tfor n, t := range newMap {\n\t\tif _, ok := currentMap[n]; !ok {\n\t\t\tresult.Add[n] = t\n\t\t}\n\t}\n\tfor n := range currentMap {\n\t\tif _, ok := newMap[n]; !ok {\n\t\t\tresult.Del = append(result.Del, n)\n\t\t}\n\t}\n\t// handle changes\n\tfor n, currentVal := range currentMap {\n\t\tnewVal, ok := newMap[n]\n\t\t// we don't have the target in the new config,\n\t\t// already handled above\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// if any target parameter changes, we need to remove\n\t\t// and re-add\n\t\t// the only case I see where we wouldn't necessarily need to restart the actual GRPC connection\n\t\t// is if Tags and EventTags changed, we could just apply the new tags internally (but right now it's done in the StartCollector phase)\n\t\tif !reflect.DeepEqual(currentVal, newVal) {\n\t\t\tresult.Add[n] = newVal\n\t\t\tresult.Del = append(result.Del, n)\n\t\t}\n\t}\n\treturn result\n}\n"
  },
  {
    "path": "pkg/loaders/loaders_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage loaders\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nvar testSet = map[string]struct {\n\tm1, m2 map[string]*types.TargetConfig\n\toutput *TargetOperation\n}{\n\t\"t1\": {\n\t\tm1: nil,\n\t\tm2: nil,\n\t\toutput: &TargetOperation{\n\t\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t2\": {\n\t\tm1: nil,\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target1\": {\n\t\t\t\t\tName: \"target1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t3\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t4\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t5\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\tm2: nil,\n\t\toutput: &TargetOperation{\n\t\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n\t\"t6\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target2\": {\n\t\t\t\t\tName: \"target2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t7\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target2\": {\n\t\t\t\t\tName: \"target2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n\t\"t8\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t\t\"target3\": {Name: \"target3\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target2\": {\n\t\t\t\t\tName: \"target2\",\n\t\t\t\t},\n\t\t\t\t\"target3\": {\n\t\t\t\t\tName: \"target3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n\t\"t9\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\"},\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target2\": {Name: \"target2\"},\n\t\t\t\"target3\": {Name: \"target3\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target3\": {\n\t\t\t\t\tName: \"target3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n\t\"t10-target-change\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Address: \"ip1\"},\n\t\t\t\"target2\": {Address: \"ip2\"},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Address: \"ip1\"},\n\t\t\t\"target2\": {Address: \"ip2new\"},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target2\": {\n\t\t\t\t\tAddress: \"ip2new\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tDel: []string{\"target2\"},\n\t\t},\n\t},\n\t\"t11-tags-change\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"a\"}},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"a\", \"b\"}},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"a\", \"b\"}},\n\t\t\t},\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n\t\"t12-both-empty\": {\n\t\tm1: map[string]*types.TargetConfig{},\n\t\tm2: map[string]*types.TargetConfig{},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: make(map[string]*types.TargetConfig, 0),\n\t\t\tDel: make([]string, 0),\n\t\t},\n\t},\n\t\"t13-slice-order-change\": {\n\t\tm1: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"a\", \"b\"}},\n\t\t},\n\t\tm2: map[string]*types.TargetConfig{\n\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"b\", \"a\"}},\n\t\t},\n\t\toutput: &TargetOperation{\n\t\t\tAdd: map[string]*types.TargetConfig{\n\t\t\t\t\"target1\": {Name: \"target1\", Tags: []string{\"b\", \"a\"}},\n\t\t\t},\n\t\t\tDel: []string{\"target1\"},\n\t\t},\n\t},\n}\n\nfunc TestGetInstancesTagsMatches(t *testing.T) {\n\tfor name, item := range testSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tres := Diff(item.m1, item.m2)\n\t\t\tt.Logf(\"exp value: %+v\", item.output)\n\t\t\tt.Logf(\"got value: %+v\", res)\n\t\t\tif len(item.output.Add) != len(res.Add) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tif len(item.output.Del) != len(res.Del) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tfor k, v1 := range item.output.Add {\n\t\t\t\tif v2, ok := res.Add[k]; ok {\n\t\t\t\t\tif v1.String() != v2.String() {\n\t\t\t\t\t\tt.Fail()\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tt.Fail()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !cmp.Equal(item.output.Del, res.Del) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/loaders/option.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage loaders\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype Option func(TargetLoader)\n\nfunc WithRegistry(reg *prometheus.Registry) Option {\n\treturn func(l TargetLoader) {\n\t\tif reg == nil {\n\t\t\treturn\n\t\t}\n\t\tl.RegisterMetrics(reg)\n\t}\n}\n\nfunc WithActions(acts map[string]map[string]interface{}) Option {\n\treturn func(l TargetLoader) {\n\t\tif len(acts) == 0 {\n\t\t\treturn\n\t\t}\n\t\tl.WithActions(acts)\n\t}\n}\n\nfunc WithTargetsDefaults(fn func(tc *types.TargetConfig) error) Option {\n\treturn func(l TargetLoader) {\n\t\tl.WithTargetsDefaults(fn)\n\t}\n}\n"
  },
  {
    "path": "pkg/lockers/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/lockers/consul_locker\"\n\t_ \"github.com/openconfig/gnmic/pkg/lockers/k8s_locker\"\n\t_ \"github.com/openconfig/gnmic/pkg/lockers/redis_locker\"\n)\n"
  },
  {
    "path": "pkg/lockers/consul_locker/consul_locker.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_locker\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/consul/api\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tdefaultSessionTTL = 10 * time.Second\n\tdefaultRetryTimer = 2 * time.Second\n\tdefaultDelay      = 5 * time.Second\n\tloggingPrefix     = \"[consul_locker] \"\n)\n\nfunc init() {\n\tlockers.Register(\"consul\", func() lockers.Locker {\n\t\treturn &ConsulLocker{\n\t\t\tCfg:             &config{},\n\t\t\tm:               new(sync.Mutex),\n\t\t\tacquiredlocks:   make(map[string]*locks),\n\t\t\tattemptinglocks: make(map[string]*locks),\n\t\t\tlogger:          log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\tservices:        make(map[string]context.CancelFunc),\n\t\t}\n\t})\n}\n\ntype ConsulLocker struct {\n\tCfg             *config\n\tclient          *api.Client\n\tlogger          *log.Logger\n\tm               *sync.Mutex\n\tacquiredlocks   map[string]*locks\n\tattemptinglocks map[string]*locks\n\tservices        map[string]context.CancelFunc\n}\n\ntype config struct {\n\tAddress     string        `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tDatacenter  string        `mapstructure:\"datacenter,omitempty\" json:\"datacenter,omitempty\"`\n\tUsername    string        `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword    string        `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\tToken       string        `mapstructure:\"token,omitempty\" json:\"token,omitempty\"`\n\tSessionTTL  time.Duration `mapstructure:\"session-ttl,omitempty\" json:\"session-ttl,omitempty\"`\n\tDelay       time.Duration `mapstructure:\"delay,omitempty\" json:\"delay,omitempty\"`\n\tRetryTimer  time.Duration `mapstructure:\"retry-timer,omitempty\" json:\"retry-timer,omitempty\"`\n\tRenewPeriod time.Duration `mapstructure:\"renew-period,omitempty\" json:\"renew-period,omitempty\"`\n\tDebug       bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\ntype locks struct {\n\tsessionID string\n\tdoneChan  chan struct{}\n}\n\nfunc (c *ConsulLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error {\n\terr := lockers.DecodeConfig(cfg, c.Cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\terr = c.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclientConfig := &api.Config{\n\t\tAddress:    c.Cfg.Address,\n\t\tScheme:     \"http\",\n\t\tDatacenter: c.Cfg.Datacenter,\n\t\tToken:      c.Cfg.Token,\n\t}\n\tif c.Cfg.Username != \"\" && c.Cfg.Password != \"\" {\n\t\tclientConfig.HttpAuth = &api.HttpBasicAuth{\n\t\t\tUsername: c.Cfg.Username,\n\t\t\tPassword: c.Cfg.Password,\n\t\t}\n\t}\n\tc.client, err = api.NewClient(clientConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb, _ := json.Marshal(c.Cfg)\n\tc.logger.Printf(\"initialized consul locker with cfg=%s\", string(b))\n\treturn nil\n}\n\nfunc (c *ConsulLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) {\n\tvar err error\n\tvar acquired bool\n\twriteOpts := new(api.WriteOptions)\n\twriteOpts = writeOpts.WithContext(ctx)\n\tkvPair := &api.KVPair{Key: key, Value: val}\n\tdoneChan := make(chan struct{})\n\tdefer func() {\n\t\tc.m.Lock()\n\t\tdefer c.m.Unlock()\n\t\tdelete(c.attemptinglocks, key)\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false, ctx.Err()\n\t\tcase <-doneChan:\n\t\t\treturn false, lockers.ErrCanceled\n\t\tdefault:\n\t\t\tacquired = false\n\t\t\tkvPair.Session, _, err = c.client.Session().Create(\n\t\t\t\t&api.SessionEntry{\n\t\t\t\t\tBehavior:  \"delete\",\n\t\t\t\t\tTTL:       c.Cfg.SessionTTL.String(),\n\t\t\t\t\tLockDelay: c.Cfg.Delay,\n\t\t\t\t},\n\t\t\t\twriteOpts,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed creating session: %v\", err)\n\t\t\t\ttime.Sleep(c.Cfg.RetryTimer)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.m.Lock()\n\t\t\tc.attemptinglocks[key] = &locks{sessionID: kvPair.Session, doneChan: doneChan}\n\t\t\tc.m.Unlock()\n\t\t\tacquired, _, err = c.client.KV().Acquire(kvPair, writeOpts)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"failed acquiring lock to %q: %v\", kvPair.Key, err)\n\t\t\t\ttime.Sleep(c.Cfg.RetryTimer)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif acquired {\n\t\t\t\tc.m.Lock()\n\t\t\t\tc.acquiredlocks[key] = &locks{sessionID: kvPair.Session, doneChan: doneChan}\n\t\t\t\tc.m.Unlock()\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tif c.Cfg.Debug {\n\t\t\t\tc.logger.Printf(\"failed acquiring lock to %q: already locked\", kvPair.Key)\n\t\t\t}\n\t\t\ttime.Sleep(c.Cfg.RetryTimer)\n\t\t}\n\t}\n}\n\nfunc (c *ConsulLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) {\n\twriteOpts := new(api.WriteOptions)\n\twriteOpts = writeOpts.WithContext(ctx)\n\n\tc.m.Lock()\n\tsessionID := \"\"\n\tdoneChan := make(chan struct{})\n\tif l, ok := c.acquiredlocks[key]; ok {\n\t\tsessionID = l.sessionID\n\t\tdoneChan = l.doneChan\n\t}\n\tc.m.Unlock()\n\terrChan := make(chan error)\n\tgo func() {\n\t\tif sessionID == \"\" {\n\t\t\terrChan <- fmt.Errorf(\"unknown key\")\n\t\t\tclose(doneChan)\n\t\t\treturn\n\t\t}\n\t\terr := c.client.Session().RenewPeriodic(c.Cfg.RenewPeriod.String(), sessionID, writeOpts, doneChan)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}()\n\n\treturn doneChan, errChan\n}\n\nfunc (c *ConsulLocker) Unlock(ctx context.Context, key string) error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tif lock, ok := c.acquiredlocks[key]; ok {\n\t\tclose(lock.doneChan)\n\t\twrOpts := new(api.WriteOptions)\n\t\t_, err := c.client.KV().Delete(key, wrOpts.WithContext(ctx))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = c.client.Session().Destroy(lock.sessionID, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(c.acquiredlocks, key)\n\t\treturn nil\n\t}\n\tif lock, ok := c.attemptinglocks[key]; ok {\n\t\tclose(lock.doneChan)\n\t\t_, err := c.client.Session().Destroy(lock.sessionID, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(c.acquiredlocks, key)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"unlock failed: unknown key %q\", key)\n}\n\nfunc (c *ConsulLocker) Stop() error {\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tfor k := range c.acquiredlocks {\n\t\tc.Unlock(ctx, k)\n\t}\n\treturn nil\n}\n\nfunc (c *ConsulLocker) SetLogger(logger *log.Logger) {\n\tif logger != nil && c.logger != nil {\n\t\tc.logger.SetOutput(logger.Writer())\n\t\tc.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// helpers\n\nfunc (c *ConsulLocker) setDefaults() error {\n\tif c.Cfg.SessionTTL <= 0 {\n\t\tc.Cfg.SessionTTL = defaultSessionTTL\n\t}\n\tif c.Cfg.RetryTimer <= 0 {\n\t\tc.Cfg.RetryTimer = defaultRetryTimer\n\t}\n\tif c.Cfg.RenewPeriod <= 0 || c.Cfg.RenewPeriod >= c.Cfg.SessionTTL {\n\t\tc.Cfg.RenewPeriod = c.Cfg.SessionTTL / 2\n\t}\n\tif c.Cfg.Delay < 0 {\n\t\tc.Cfg.Delay = defaultDelay\n\t}\n\tif c.Cfg.Delay > 60*time.Second {\n\t\tc.Cfg.Delay = 60 * time.Second\n\t}\n\treturn nil\n}\n\nfunc (c *ConsulLocker) String() string {\n\tb, err := json.Marshal(c.Cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n"
  },
  {
    "path": "pkg/lockers/consul_locker/consul_registration.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage consul_locker\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/hashicorp/consul/api\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst defaultWatchTimeout = 1 * time.Minute\n\nfunc (c *ConsulLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error {\n\tservice := &api.AgentServiceRegistration{\n\t\tID:      s.ID,\n\t\tName:    s.Name,\n\t\tAddress: s.Address,\n\t\tPort:    s.Port,\n\t\tTags:    s.Tags,\n\t\tChecks: api.AgentServiceChecks{\n\t\t\t{\n\t\t\t\tTTL:                            s.TTL.String(),\n\t\t\t\tDeregisterCriticalServiceAfter: \"5s\",\n\t\t\t},\n\t\t},\n\t}\n\tsctx, cancel := context.WithCancel(ctx)\n\tc.m.Lock()\n\tc.services[s.ID] = cancel\n\tc.m.Unlock()\n\tttlCheckID := \"service:\" + s.ID\n\terr := c.client.Agent().ServiceRegister(service)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// keep service with ttl\n\terr = c.client.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\tif err != nil {\n\t\treturn err\n\t}\n\tticker := time.NewTicker(s.TTL / 2)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terr = c.client.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-sctx.Done():\n\t\t\terr = c.client.Agent().UpdateTTL(ttlCheckID, sctx.Err().Error(), api.HealthCritical)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (c *ConsulLocker) Deregister(s string) error {\n\tc.m.Lock()\n\tif cfn, ok := c.services[s]; ok {\n\t\tcfn()\n\t}\n\tc.m.Unlock()\n\treturn c.client.Agent().ServiceDeregister(s)\n}\n\nfunc (c *ConsulLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error {\n\tif watchTimeout <= 0 {\n\t\twatchTimeout = defaultWatchTimeout\n\t}\n\tvar index uint64\n\tqOpts := &api.QueryOptions{\n\t\tWaitIndex: index,\n\t\tWaitTime:  watchTimeout,\n\t}\n\tvar err error\n\t// long blocking watch\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tif c.Cfg.Debug {\n\t\t\t\tc.logger.Printf(\"(re)starting watch service=%q, index=%d\", serviceName, qOpts.WaitIndex)\n\t\t\t}\n\t\t\tindex, err = c.watch(ctx, qOpts, serviceName, tags, sChan)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Printf(\"service %q watch failed: %v\", serviceName, err)\n\t\t\t}\n\t\t\tif index == 1 {\n\t\t\t\tqOpts.WaitIndex = index\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif index > qOpts.WaitIndex {\n\t\t\t\tqOpts.WaitIndex = index\n\t\t\t}\n\t\t\t// reset WaitIndex if the returned index decreases\n\t\t\t// https://www.consul.io/api-docs/features/blocking#implementation-details\n\t\t\tif index < qOpts.WaitIndex {\n\t\t\t\tqOpts.WaitIndex = 0\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *ConsulLocker) watch(ctx context.Context, qOpts *api.QueryOptions, serviceName string, tags []string, sChan chan<- []*lockers.Service) (uint64, error) {\n\tqOpts = qOpts.WithContext(ctx)\n\tse, meta, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, qOpts)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif meta == nil {\n\t\tmeta = new(api.QueryMeta)\n\t}\n\tif meta.LastIndex == qOpts.WaitIndex {\n\t\tc.logger.Printf(\"service=%q did not change, lastIndex=%d\", serviceName, meta.LastIndex)\n\t\treturn meta.LastIndex, nil\n\t}\n\tif len(se) == 0 {\n\t\treturn 1, nil\n\t}\n\tnewSrvs := make([]*lockers.Service, 0)\n\tfor _, srv := range se {\n\t\taddr := srv.Service.Address\n\t\tif addr == \"\" {\n\t\t\taddr = srv.Node.Address\n\t\t}\n\t\tnewSrvs = append(newSrvs, &lockers.Service{\n\t\t\tID:      srv.Service.ID,\n\t\t\tAddress: net.JoinHostPort(addr, strconv.Itoa(srv.Service.Port)),\n\t\t\tTags:    srv.Service.Tags,\n\t\t})\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0, ctx.Err()\n\tcase sChan <- newSrvs:\n\t}\n\treturn meta.LastIndex, nil\n}\n\nfunc (c *ConsulLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) {\n\tse, _, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, &api.QueryOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewSrvs := make([]*lockers.Service, 0)\n\tfor _, srv := range se {\n\t\taddr := srv.Service.Address\n\t\tif addr == \"\" {\n\t\t\taddr = srv.Node.Address\n\t\t}\n\t\tnewSrvs = append(newSrvs, &lockers.Service{\n\t\t\tID:      srv.Service.ID,\n\t\t\tAddress: net.JoinHostPort(addr, strconv.Itoa(srv.Service.Port)),\n\t\t\tTags:    srv.Service.Tags,\n\t\t})\n\t}\n\treturn newSrvs, nil\n}\n\nfunc (c *ConsulLocker) IsLocked(ctx context.Context, k string) (bool, error) {\n\tqOpts := &api.QueryOptions{}\n\tkv, _, err := c.client.KV().Get(k, qOpts.WithContext(ctx))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif kv == nil {\n\t\treturn false, nil\n\t}\n\treturn kv.LockIndex > 0, nil\n}\n\nfunc (c *ConsulLocker) List(ctx context.Context, prefix string) (map[string]string, error) {\n\tqOpts := &api.QueryOptions{}\n\tkvs, _, err := c.client.KV().List(prefix, qOpts.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif kvs == nil {\n\t\treturn nil, err\n\t}\n\trs := make(map[string]string)\n\tfor _, kv := range kvs {\n\t\trs[kv.Key] = string(kv.Value)\n\t}\n\treturn rs, nil\n}\n"
  },
  {
    "path": "pkg/lockers/k8s_locker/k8s_locker.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage k8s_locker\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tcoordinationv1 \"k8s.io/api/coordination/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/utils/ptr\"\n\n\t\"github.com/google/uuid\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tdefaultLeaseDuration = 10 * time.Second\n\tdefaultRetryTimer    = 2 * time.Second\n\tloggingPrefix        = \"[k8s_locker] \"\n\tdefaultNamespace     = \"default\"\n\torigKeyName          = \"original-key\"\n)\n\nfunc init() {\n\tlockers.Register(\"k8s\", func() lockers.Locker {\n\t\treturn &k8sLocker{\n\t\t\tCfg:             &config{},\n\t\t\tm:               new(sync.RWMutex),\n\t\t\tacquiredlocks:   make(map[string]*lock),\n\t\t\tattemptinglocks: make(map[string]*lock),\n\t\t\tlogger:          log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\ntype k8sLocker struct {\n\tCfg             *config\n\tclientset       *kubernetes.Clientset\n\tlogger          *log.Logger\n\tm               *sync.RWMutex\n\tacquiredlocks   map[string]*lock\n\tattemptinglocks map[string]*lock\n\n\tidentity string // hostname\n}\n\ntype config struct {\n\tNamespace     string        `mapstructure:\"namespace,omitempty\" json:\"namespace,omitempty\"`\n\tLeaseDuration time.Duration `mapstructure:\"lease-duration,omitempty\" json:\"lease-duration,omitempty\"`\n\tRenewPeriod   time.Duration `mapstructure:\"renew-period,omitempty\" json:\"renew-period,omitempty\"`\n\tRetryTimer    time.Duration `mapstructure:\"retry-timer,omitempty\" json:\"retry-timer,omitempty\"`\n\tDebug         bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\ntype lock struct {\n\tlease    *coordinationv1.Lease\n\tdoneChan chan struct{}\n}\n\nfunc (k *k8sLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error {\n\terr := lockers.DecodeConfig(cfg, k.Cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(k)\n\t}\n\terr = k.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinClusterConfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk.clientset, err = kubernetes.NewForConfig(inClusterConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.identity = k.getIdentity()\n\treturn nil\n}\n\nfunc (k *k8sLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) {\n\tnkey := strings.ReplaceAll(key, \"/\", \"-\")\n\tdoneChan := make(chan struct{})\n\tl := &coordinationv1.Lease{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\torigKeyName: key,\n\t\t\t},\n\t\t\tName:      nkey,\n\t\t\tNamespace: k.Cfg.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": \"gnmic\",\n\t\t\t\tnkey:  string(val),\n\t\t\t},\n\t\t},\n\t\tSpec: coordinationv1.LeaseSpec{\n\t\t\tHolderIdentity:       ptr.To(k.identity),\n\t\t\tLeaseDurationSeconds: ptr.To(int32(k.Cfg.LeaseDuration / time.Second)),\n\t\t},\n\t}\n\tk.m.Lock()\n\tk.attemptinglocks[nkey] = &lock{\n\t\tlease:    l,\n\t\tdoneChan: doneChan,\n\t}\n\tk.m.Unlock()\n\t// cleanup when done\n\tdefer func() {\n\t\tk.m.Lock()\n\t\tdefer k.m.Unlock()\n\t\tdelete(k.attemptinglocks, nkey)\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false, ctx.Err()\n\t\tcase <-doneChan:\n\t\t\treturn false, lockers.ErrCanceled\n\t\tdefault:\n\t\t\tnow := metav1.NowMicro()\n\t\t\tvar ol *coordinationv1.Lease\n\t\t\tvar err error\n\t\t\t// get or create\n\t\t\tol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, nkey, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\t// create lease\n\t\t\t\tk.logger.Printf(\"lease %q not found, creating it: %+v\", nkey, l.String())\n\t\t\t\tl.Spec.AcquireTime = &now\n\t\t\t\tl.Spec.RenewTime = &now\n\t\t\t\tol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Create(ctx, l, metav1.CreateOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tk.m.Lock()\n\t\t\t\tk.acquiredlocks[nkey] = &lock{\n\t\t\t\t\tlease:    ol,\n\t\t\t\t\tdoneChan: doneChan,\n\t\t\t\t}\n\t\t\t\tk.m.Unlock()\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\t// obtained, compare\n\t\t\tif ol != nil && ol.Spec.HolderIdentity != nil && *ol.Spec.HolderIdentity != \"\" {\n\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"%q held by other instance: %v\", ol.Name, *ol.Spec.HolderIdentity != k.identity)\n\t\t\t\t\tk.logger.Printf(\"%q lease has renewTime: %v\", ol.Name, ol.Spec.RenewTime != nil)\n\t\t\t\t}\n\t\t\t\tif *ol.Spec.HolderIdentity != k.identity && ol.Spec.RenewTime != nil {\n\t\t\t\t\texpectedRenewTime := ol.Spec.RenewTime.Add(time.Duration(*ol.Spec.LeaseDurationSeconds) * time.Second)\n\t\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\t\tk.logger.Printf(\"%q existing lease renew time %v\", ol.Name, ol.Spec.RenewTime)\n\t\t\t\t\t\tk.logger.Printf(\"%q expected lease renew time %v\", ol.Name, expectedRenewTime)\n\t\t\t\t\t\tk.logger.Printf(\"%q renew time passed: %v\", ol.Name, expectedRenewTime.Before(now.Time))\n\t\t\t\t\t}\n\t\t\t\t\tif !expectedRenewTime.Before(now.Time) {\n\t\t\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\t\t\tk.logger.Printf(\"%q is currently held by %s\", ol.Name, *ol.Spec.HolderIdentity)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttime.Sleep(k.Cfg.RenewPeriod)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tk.logger.Printf(\"taking over lease %q\", nkey)\n\t\t\t// update the lease\n\t\t\tnow = metav1.NowMicro()\n\t\t\tl.Spec.AcquireTime = &now\n\t\t\tl.Spec.RenewTime = &now\n\t\t\t// set resource version to the latest value known\n\t\t\tl.SetResourceVersion(ol.GetResourceVersion())\n\t\t\tk.logger.Printf(\"%q updating with %+v\", l.Name, l)\n\t\t\tol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Update(ctx, l, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tk.m.Lock()\n\t\t\tif lc, ok := k.acquiredlocks[nkey]; ok {\n\t\t\t\tlc.lease = ol\n\t\t\t} else {\n\t\t\t\tk.acquiredlocks[nkey] = &lock{lease: ol, doneChan: doneChan}\n\t\t\t}\n\t\t\tk.m.Unlock()\n\t\t\treturn true, nil\n\t\t}\n\t}\n}\n\nfunc (k *k8sLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) {\n\tdoneChan := make(chan struct{})\n\terrChan := make(chan error)\n\tnkey := strings.ReplaceAll(key, \"/\", \"-\")\n\n\tgo func() {\n\t\tdefer close(doneChan)\n\t\tticker := time.NewTicker(k.Cfg.RenewPeriod)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\terrChan <- ctx.Err()\n\t\t\t\treturn\n\t\t\tcase <-doneChan:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tk.m.RLock()\n\t\t\t\tlock, ok := k.acquiredlocks[nkey]\n\t\t\t\tk.m.RUnlock()\n\t\t\t\tif !ok {\n\t\t\t\t\terrChan <- fmt.Errorf(\"unable to maintain lock %q: not found in acquiredlocks\", nkey)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tol, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, nkey, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- fmt.Errorf(\"unable to maintain lock %q: %v\", nkey, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlock.lease.SetResourceVersion(ol.GetResourceVersion())\n\t\t\t\tswitch k.compareLeases(lock.lease, ol) {\n\t\t\t\tcase 0, 1:\n\t\t\t\t\tnow := metav1.NowMicro()\n\t\t\t\t\tlock.lease.Spec.AcquireTime = &now\n\t\t\t\t\tlock.lease.Spec.RenewTime = &now\n\t\t\t\t\tol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Update(ctx, lock.lease, metav1.UpdateOptions{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrChan <- fmt.Errorf(\"unable to update lock %q: %v\", nkey, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tk.m.Lock()\n\t\t\t\t\tif lock, ok := k.acquiredlocks[nkey]; ok {\n\t\t\t\t\t\tlock.lease = ol\n\t\t\t\t\t}\n\t\t\t\t\tk.m.Unlock()\n\t\t\t\tcase -1:\n\t\t\t\t\terrChan <- fmt.Errorf(\"%q failed to keep lease\", nkey)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn doneChan, errChan\n}\n\nfunc (k *k8sLocker) Unlock(ctx context.Context, key string) error {\n\tnkey := strings.ReplaceAll(key, \"/\", \"-\")\n\tk.m.Lock()\n\tdefer k.m.Unlock()\n\tk.unlock(ctx, nkey)\n\treturn nil\n}\n\n// assumes the mutex is locked\nfunc (k *k8sLocker) unlock(ctx context.Context, key string) error {\n\tif lock, ok := k.acquiredlocks[key]; ok {\n\t\tdelete(k.acquiredlocks, key)\n\t\treturn k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Delete(ctx, lock.lease.Name, metav1.DeleteOptions{})\n\t}\n\tif lock, ok := k.attemptinglocks[key]; ok {\n\t\tdelete(k.attemptinglocks, key)\n\t\tclose(lock.doneChan)\n\t\treturn k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Delete(ctx, lock.lease.Name, metav1.DeleteOptions{})\n\t}\n\treturn nil\n}\n\nfunc (k *k8sLocker) Stop() error {\n\tk.m.Lock()\n\tdefer k.m.Unlock()\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tfor key := range k.acquiredlocks {\n\t\tk.unlock(ctx, key)\n\t}\n\treturn nil\n}\n\nfunc (k *k8sLocker) SetLogger(logger *log.Logger) {\n\tif logger != nil && k.logger != nil {\n\t\tk.logger.SetOutput(logger.Writer())\n\t\tk.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// helpers\n\nfunc (k *k8sLocker) setDefaults() error {\n\tif k.Cfg.Namespace == \"\" {\n\t\tk.Cfg.Namespace = defaultNamespace\n\t}\n\tif k.Cfg.LeaseDuration <= 0 {\n\t\tk.Cfg.LeaseDuration = defaultLeaseDuration\n\t}\n\tif k.Cfg.RenewPeriod <= 0 || k.Cfg.RenewPeriod >= k.Cfg.LeaseDuration {\n\t\tk.Cfg.RenewPeriod = k.Cfg.LeaseDuration / 2\n\t}\n\tif k.Cfg.RetryTimer <= 0 {\n\t\tk.Cfg.RetryTimer = defaultRetryTimer\n\t}\n\treturn nil\n}\n\nfunc (k *k8sLocker) String() string {\n\tb, err := json.Marshal(k.Cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\n// compares 2 Leases, assume l1 is not nil and has a valid holderIdentity value.\n// returns 0 if l1 and l2 have the same holder identity\n// return 1 if l2 is nil, has no holder or has an expired renewTime\n// returns -1 if l2 has another holder identity and has a valid renewTime\nfunc (l *k8sLocker) compareLeases(l1, l2 *coordinationv1.Lease) int {\n\tif l2 == nil {\n\t\treturn 1\n\t}\n\tif l2.Spec.HolderIdentity == nil {\n\t\treturn 1\n\t}\n\tnow := time.Now()\n\tif *l2.Spec.HolderIdentity == \"\" {\n\t\treturn 1\n\t}\n\tif *l1.Spec.HolderIdentity != *l2.Spec.HolderIdentity {\n\t\tif l2.Spec.RenewTime == nil {\n\t\t\treturn 1\n\t\t}\n\t\texpectedRenewTime := l2.Spec.RenewTime.Add(time.Duration(*l2.Spec.LeaseDurationSeconds) * time.Second)\n\t\tif expectedRenewTime.Before(now) {\n\t\t\treturn 1\n\t\t} else {\n\t\t\treturn -1\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (l *k8sLocker) getIdentity() string {\n\tname, err := os.Hostname()\n\tif err != nil {\n\t\treturn uuid.NewString()\n\t}\n\treturn name\n}\n"
  },
  {
    "path": "pkg/lockers/k8s_locker/k8s_registration.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage k8s_locker\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tcorev1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst defaultWatchTimeout = 10 * time.Second\n\nfunc (k *k8sLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error {\n\treturn nil\n}\n\nfunc (k *k8sLocker) Deregister(s string) error {\n\treturn nil\n}\n\nfunc (k *k8sLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error {\n\tif watchTimeout <= 0 {\n\t\twatchTimeout = defaultWatchTimeout\n\t}\n\tresourceVersion := \"\"\n\tvar err error\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tresourceVersion, err = k.watch(ctx, serviceName, tags, sChan, watchTimeout, resourceVersion)\n\t\t\tif err != nil {\n\t\t\t\tk.logger.Printf(\"watch ended with error: %s\", err)\n\t\t\t\ttime.Sleep(k.Cfg.RetryTimer)\n\t\t\t} else if k.Cfg.Debug {\n\t\t\t\tk.logger.Print(\"watch timed out\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (k *k8sLocker) watch(ctx context.Context, serviceName string, _ []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration, resourceVersion string) (string, error) {\n\ttimeoutSeconds := int64(watchTimeout.Seconds())\n\tlistopts := metav1.ListOptions{\n\t\tFieldSelector:   fields.OneTermEqualSelector(metav1.ObjectNameField, serviceName).String(),\n\t\tResourceVersion: resourceVersion,\n\t\tTimeoutSeconds:  &timeoutSeconds,\n\t}\n\tif k.Cfg.Debug {\n\t\tif resourceVersion == \"\" {\n\t\t\tk.logger.Print(\"starting watch beginning with unspecified resource version\")\n\t\t} else {\n\t\t\tk.logger.Printf(\"starting watch beginning with resource version %s\", resourceVersion)\n\t\t}\n\t}\n\twatched, err := k.clientset.CoreV1().Endpoints(k.Cfg.Namespace).Watch(ctx, listopts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer watched.Stop()\n\n\twatchChan := watched.ResultChan()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tcase event := <-watchChan:\n\t\t\tswitch event.Type {\n\t\t\tcase watch.Modified, watch.Added:\n\t\t\t\tendpoints, ok := event.Object.(*corev1.Endpoints)\n\t\t\t\tif !ok {\n\t\t\t\t\t// this ought not to happen, but we should probably\n\t\t\t\t\t// start from scratch next time in case it does\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error converting watch result to an endpoint\")\n\t\t\t\t}\n\t\t\t\tresourceVersion = endpoints.ResourceVersion\n\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"received watch event %s for resource version %s\", event.Type, resourceVersion)\n\t\t\t\t}\n\t\t\t\tsvcs, err := parseEndpoint(endpoints)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t\tsChan <- svcs\n\t\t\tcase \"\":\n\t\t\t\t// reached the timeout. return the version we last saw so\n\t\t\t\t// we can resume watching\n\t\t\t\treturn resourceVersion, nil\n\t\t\tdefault:\n\t\t\t\t// something else happened, including maybe the object we\n\t\t\t\t// were watching being deleted. we'll need to start the\n\t\t\t\t// next watch from scratch, so don't return the resource\n\t\t\t\t// version\n\t\t\t\treturn \"\", fmt.Errorf(\"unexpected watch event: %s\", event.Type)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc parseEndpoint(endpoint *corev1.Endpoints) ([]*lockers.Service, error) {\n\t// the service should only have a single port number assigned, so\n\t// all subsets should have the port number we're looking for\n\tif len(endpoint.Subsets) <= 0 {\n\t\treturn nil, fmt.Errorf(\"no subsets found in endpoint for service %s\", endpoint.Name)\n\t}\n\tif len(endpoint.Subsets[0].Ports) <= 0 {\n\t\treturn nil, fmt.Errorf(\"no ports found for service %s\", endpoint.Name)\n\t}\n\tport := endpoint.Subsets[0].Ports[0].Port\n\n\tservices := make([]*lockers.Service, 0, len(endpoint.Subsets[0].Addresses))\n\tfor _, subset := range endpoint.Subsets {\n\t\tfor _, addr := range subset.Addresses {\n\t\t\ttargetName := addr.IP\n\t\t\tif addr.TargetRef != nil {\n\t\t\t\ttargetName = addr.TargetRef.Name\n\t\t\t}\n\t\t\tls := &lockers.Service{\n\t\t\t\tID:      fmt.Sprintf(\"%s-api\", targetName),\n\t\t\t\tAddress: fmt.Sprintf(\"%s:%d\", addr.IP, port),\n\t\t\t\tTags: []string{\n\t\t\t\t\tfmt.Sprintf(\"instance-name=%s\", targetName),\n\t\t\t\t},\n\t\t\t}\n\t\t\tservices = append(services, ls)\n\t\t}\n\t}\n\n\treturn services, nil\n}\n\nfunc (k *k8sLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) {\n\tep, err := k.clientset.CoreV1().Endpoints(k.Cfg.Namespace).Get(ctx, serviceName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseEndpoint(ep)\n}\n\nfunc (k *k8sLocker) IsLocked(ctx context.Context, key string) (bool, error) {\n\tkey = strings.ReplaceAll(key, \"/\", \"-\")\n\tol, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, key, metav1.GetOptions{})\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tif ol == nil {\n\t\treturn false, nil\n\t}\n\tif ol.Spec.RenewTime == nil {\n\t\treturn false, nil\n\t}\n\tnow := metav1.NowMicro()\n\texpectedRenewTime := ol.Spec.RenewTime.Add(time.Duration(*ol.Spec.LeaseDurationSeconds) * time.Second)\n\treturn expectedRenewTime.After(now.Time), nil\n}\n\nfunc (k *k8sLocker) List(ctx context.Context, prefix string) (map[string]string, error) {\n\tll, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).List(ctx,\n\t\tmetav1.ListOptions{\n\t\t\tLabelSelector: \"app=gnmic\",\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprefix = strings.ReplaceAll(prefix, \"/\", \"-\")\n\trs := make(map[string]string, len(ll.Items))\n\tfor _, l := range ll.Items {\n\t\tfor key, v := range l.Labels {\n\t\t\tif key == \"app\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(key, prefix) {\n\t\t\t\tokey, ok := l.Annotations[origKeyName]\n\t\t\t\tif ok {\n\t\t\t\t\trs[okey] = v\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn rs, nil\n}\n"
  },
  {
    "path": "pkg/lockers/locker.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage lockers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/mitchellh/mapstructure\"\n)\n\nvar ErrCanceled = errors.New(\"canceled\")\n\ntype Locker interface {\n\t// Init initialises the locker data, with the given configuration read from flags/files.\n\tInit(context.Context, map[string]any, ...Option) error\n\t// Stop is called when the locker instance is called. It should unlock all acquired locks.\n\tStop() error\n\tSetLogger(*log.Logger)\n\n\t// This is the Target locking logic.\n\n\t// Lock acquires a lock on given key.\n\tLock(context.Context, string, []byte) (bool, error)\n\t// KeepLock maintains the lock on the target.\n\tKeepLock(context.Context, string) (chan struct{}, chan error)\n\t// IsLocked replys if the target given as string is currently locked or not.\n\tIsLocked(context.Context, string) (bool, error)\n\t// Unlock unlocks the target log.\n\tUnlock(context.Context, string) error\n\n\t// This is the instance registration logic.\n\n\t// Register registers this instance in the registry. It must also maintain the registration (called in a goroutine from the main). ServiceRegistration.ID contains the ID of the service to register.\n\tRegister(context.Context, *ServiceRegistration) error\n\t// Deregister removes this instance from the registry. This looks like it's not called.\n\tDeregister(string) error\n\n\t// GetServices must return the gnmic instances.\n\tGetServices(ctx context.Context, serviceName string, tags []string) ([]*Service, error)\n\t// WatchServices must push all existing discovered gnmic instances\n\t// into the provided channel.\n\tWatchServices(ctx context.Context, serviceName string, tags []string, ch chan<- []*Service, dur time.Duration) error\n\n\t// Mixed registration/target lock functions\n\n\t// List returns all locks that start with prefix string,\n\t// indexed by the lock name. Could be target locks or leader lock. It must return a map of matching keys to instance name.\n\tList(ctx context.Context, prefix string) (map[string]string, error)\n}\n\ntype Initializer func() Locker\n\nvar Lockers = map[string]Initializer{}\n\ntype Option func(Locker)\n\nfunc WithLogger(logger *log.Logger) Option {\n\treturn func(i Locker) {\n\t\ti.SetLogger(logger)\n\t}\n}\n\nvar LockerTypes = []string{\n\t\"consul\",\n\t\"k8s\",\n\t\"redis\",\n}\n\nfunc Register(name string, initFn Initializer) {\n\tLockers[name] = initFn\n}\n\nfunc DecodeConfig(src, dst interface{}) error {\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     dst,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn decoder.Decode(src)\n}\n\ntype ServiceRegistration struct {\n\tID      string\n\tName    string\n\tAddress string\n\tPort    int\n\tTags    []string\n\tTTL     time.Duration\n}\n\ntype Service struct {\n\tID      string\n\tAddress string\n\tTags    []string\n}\n"
  },
  {
    "path": "pkg/lockers/redis_locker/redis_locker.go",
    "content": "package redis_locker\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/go-redsync/redsync/v4\"\n\t\"github.com/go-redsync/redsync/v4/redis/goredis/v9\"\n\tgoredislib \"github.com/redis/go-redis/v9\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tdefaultLeaseDuration = 10 * time.Second\n\tdefaultRetryTimer    = 2 * time.Second\n\tdefaultPollTimer     = 10 * time.Second\n\tloggingPrefix        = \"[redis_locker] \"\n)\n\nfunc init() {\n\tlockers.Register(\"redis\", func() lockers.Locker {\n\t\treturn &redisLocker{\n\t\t\tCfg:             &config{},\n\t\t\tm:               new(sync.RWMutex),\n\t\t\tacquiredLocks:   make(map[string]*redsync.Mutex),\n\t\t\tattemptingLocks: make(map[string]*redsync.Mutex),\n\t\t\tregisterLock:    make(map[string]context.CancelFunc),\n\t\t\tlogger:          log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\ntype redisLocker struct {\n\tCfg             *config\n\tlogger          *log.Logger\n\tm               *sync.RWMutex\n\tacquiredLocks   map[string]*redsync.Mutex\n\tattemptingLocks map[string]*redsync.Mutex\n\tregisterLock    map[string]context.CancelFunc\n\n\tclient      goredislib.UniversalClient\n\tredisLocker *redsync.Redsync\n}\n\ntype config struct {\n\tServers       []string      `mapstructure:\"servers,omitempty\" json:\"servers,omitempty\"`\n\tMasterName    string        `mapstructure:\"master-name,omitempty\" json:\"master-name,omitempty\"`\n\tPassword      string        `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\tLeaseDuration time.Duration `mapstructure:\"lease-duration,omitempty\" json:\"lease-duration,omitempty\"`\n\tRenewPeriod   time.Duration `mapstructure:\"renew-period,omitempty\" json:\"renew-period,omitempty\"`\n\tRetryTimer    time.Duration `mapstructure:\"retry-timer,omitempty\" json:\"retry-timer,omitempty\"`\n\tPollTimer     time.Duration `mapstructure:\"poll-timer,omitempty\" json:\"poll-timer,omitempty\"`\n\tDebug         bool          `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\nfunc (k *redisLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error {\n\terr := lockers.DecodeConfig(cfg, k.Cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, opt := range opts {\n\t\topt(k)\n\t}\n\terr = k.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.client = goredislib.NewUniversalClient(&goredislib.UniversalOptions{\n\t\tAddrs:      k.Cfg.Servers,\n\t\tMasterName: k.Cfg.MasterName,\n\t\tPassword:   k.Cfg.Password,\n\t})\n\tif err := k.client.Ping(ctx).Err(); err != nil {\n\t\treturn fmt.Errorf(\"cannot contact redis server: %w\", err)\n\t}\n\tk.redisLocker = redsync.New(goredis.NewPool(k.client))\n\treturn nil\n}\n\nfunc (k *redisLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) {\n\tif k.Cfg.Debug {\n\t\tk.logger.Printf(\"attempting to lock=%s\", key)\n\t}\n\tmu := k.redisLocker.NewMutex(\n\t\tkey,\n\t\tredsync.WithGenValueFunc(func() (string, error) {\n\t\t\trand, err := k.genRandValue()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"%s-%s\", val, rand), nil\n\t\t}),\n\t\tredsync.WithExpiry(k.Cfg.LeaseDuration),\n\t)\n\tk.m.Lock()\n\tk.attemptingLocks[key] = mu\n\tk.m.Unlock()\n\tdefer func() {\n\t\tk.m.Lock()\n\t\tdefer k.m.Unlock()\n\t\tdelete(k.attemptingLocks, key)\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false, ctx.Err()\n\t\tdefault:\n\t\t\terr := mu.LockContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\tswitch err.(type) {\n\t\t\t\tcase *redsync.ErrTaken:\n\t\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\t\tk.logger.Printf(\"lock already taken lock=%s: %v\", key, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn false, nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn false, fmt.Errorf(\"failed to acquire lock=%s: %w\", key, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tk.m.Lock()\n\t\t\tk.acquiredLocks[key] = mu\n\t\t\tk.m.Unlock()\n\t\t\treturn true, nil\n\t\t}\n\t}\n}\n\nfunc (k *redisLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) {\n\tdoneChan := make(chan struct{})\n\terrChan := make(chan error)\n\n\tgo func() {\n\t\tdefer close(doneChan)\n\t\tticker := time.NewTicker(k.Cfg.RenewPeriod)\n\t\tk.m.RLock()\n\t\tlock, ok := k.acquiredLocks[key]\n\t\tk.m.RUnlock()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\terrChan <- ctx.Err()\n\t\t\t\treturn\n\t\t\tcase <-doneChan:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tif !ok {\n\t\t\t\t\terrChan <- fmt.Errorf(\"unable to maintain lock %q: not found in acquiredlocks\", key)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tok, err := lock.ExtendContext(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !ok {\n\t\t\t\t\terrChan <- fmt.Errorf(\"could not keep lock\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}()\n\treturn doneChan, errChan\n}\n\nfunc (k *redisLocker) Unlock(ctx context.Context, key string) error {\n\tk.m.Lock()\n\tdefer k.m.Unlock()\n\tif lock, ok := k.acquiredLocks[key]; ok {\n\t\tdelete(k.acquiredLocks, key)\n\t\tok, err := lock.Unlock()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"failed to unlock lock %s\", key)\n\t\t}\n\t}\n\tif lock, ok := k.attemptingLocks[key]; ok {\n\t\tdelete(k.attemptingLocks, key)\n\t\t_, err := lock.Unlock()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (k *redisLocker) Stop() error {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tkeys := []string{}\n\tk.m.RLock()\n\tfor key := range k.acquiredLocks {\n\t\tkeys = append(keys, key)\n\t}\n\tk.m.RUnlock()\n\tfor _, key := range keys {\n\t\tk.Unlock(ctx, key)\n\t}\n\treturn k.Deregister(\"\")\n}\n\nfunc (k *redisLocker) SetLogger(logger *log.Logger) {\n\tif logger != nil && k.logger != nil {\n\t\tk.logger.SetOutput(logger.Writer())\n\t\tk.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// helpers\n\nfunc (k *redisLocker) setDefaults() error {\n\tif k.Cfg.LeaseDuration <= 0 {\n\t\tk.Cfg.LeaseDuration = defaultLeaseDuration\n\t}\n\tif k.Cfg.RenewPeriod <= 0 || k.Cfg.RenewPeriod >= k.Cfg.LeaseDuration {\n\t\tk.Cfg.RenewPeriod = k.Cfg.LeaseDuration / 2\n\t}\n\tif k.Cfg.RetryTimer <= 0 {\n\t\tk.Cfg.RetryTimer = defaultRetryTimer\n\t}\n\tif k.Cfg.PollTimer <= 0 {\n\t\tk.Cfg.PollTimer = defaultPollTimer\n\t}\n\treturn nil\n}\n\nfunc (k *redisLocker) String() string {\n\tb, err := json.Marshal(k.Cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\n// genRandValue is required to generate a random value\n// so that the redislock algorithm works properly\n// especially in multi-server setups.\nfunc (k *redisLocker) genRandValue() (string, error) {\n\tb := make([]byte, 16)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(b), nil\n}\n"
  },
  {
    "path": "pkg/lockers/redis_locker/redis_registration.go",
    "content": "package redis_locker\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/go-redsync/redsync/v4\"\n\tgoredis \"github.com/redis/go-redis/v9\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\n// defaultWatchTimeout\nconst defaultWatchTimeout = 10 * time.Second\n\n// redisRegistration represents a gnmic endpoint in redis.\n// It's serialised in the redis value to allow recovering\n// it during service discovery.\ntype redisRegistration struct {\n\tID      string\n\tAddress string\n\tPort    int\n\tTags    []string\n\tRand    string\n}\n\nfunc (k *redisLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tk.m.Lock()\n\tk.registerLock[s.ID] = cancel\n\tk.m.Unlock()\n\tif k.Cfg.Debug {\n\t\tk.logger.Printf(\"locking service=%s\", s.ID)\n\t}\n\tmutex := k.redisLocker.NewMutex(\n\t\tfmt.Sprintf(\"%s-%s\", s.Name, s.ID),\n\t\tredsync.WithGenValueFunc(func() (string, error) {\n\t\t\trand, err := k.genRandValue()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treg := &redisRegistration{\n\t\t\t\tID:      s.ID,\n\t\t\t\tAddress: s.Address,\n\t\t\t\tPort:    s.Port,\n\t\t\t\tTags:    s.Tags,\n\t\t\t\tRand:    rand,\n\t\t\t}\n\t\t\tval, err := json.Marshal(reg)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn string(val), nil\n\t\t}),\n\t\tredsync.WithExpiry(s.TTL),\n\t)\n\n\terr := mutex.LockContext(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to lock service=%s, %w\", s.ID, err)\n\t}\n\n\tticker := time.NewTicker(s.TTL / 2)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tok, err := mutex.ExtendContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to extend lock for service=%s: %w\", s.ID, err)\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"could not extend lock for service=%s\", s.ID)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tmutex.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (k *redisLocker) Deregister(s string) error {\n\tk.m.Lock()\n\tdefer k.m.Unlock()\n\tfor sid, lockCancel := range k.registerLock {\n\t\tif k.Cfg.Debug {\n\t\t\tk.logger.Printf(\"unlocking service=%s\", sid)\n\t\t}\n\t\tlockCancel()\n\t\tdelete(k.registerLock, sid)\n\t}\n\treturn nil\n}\n\nfunc (k *redisLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error {\n\tif watchTimeout <= 0 {\n\t\twatchTimeout = defaultWatchTimeout\n\t}\n\tvar err error\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tif k.Cfg.Debug {\n\t\t\t\tk.logger.Printf(\"(re)starting watch service=%q\", serviceName)\n\t\t\t}\n\t\t\terr = k.watch(ctx, serviceName, tags, sChan, watchTimeout)\n\t\t\tif err != nil {\n\t\t\t\tk.logger.Printf(\"watch ended with error: %s\", err)\n\t\t\t\ttime.Sleep(k.Cfg.RetryTimer)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttime.Sleep(k.Cfg.PollTimer)\n\t\t}\n\t}\n}\n\nfunc (k *redisLocker) watch(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error {\n\t// timeoutSeconds := int64(watchTimeout.Seconds())\n\t// TODO: implement watch\n\tservices, err := k.GetServices(ctx, serviceName, tags)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsChan <- services\n\treturn nil\n}\n\nfunc (k *redisLocker) getBatchOfKeys(ctx context.Context, key string, batchSize int64, cursor uint64) (uint64, map[string]*goredis.StringCmd, error) {\n\tkeys, cursor, err := k.client.Scan(\n\t\tctx,\n\t\tcursor,\n\t\tkey,\n\t\tbatchSize,\n\t).Result()\n\tif err != nil {\n\t\treturn 0, nil, fmt.Errorf(\"failed to scan keys: %w\", err)\n\t}\n\n\tresults := map[string]*goredis.StringCmd{}\n\t_, err = k.client.Pipelined(ctx, func(p goredis.Pipeliner) error {\n\t\tfor _, k := range keys {\n\t\t\tresults[k] = p.Get(ctx, k)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn cursor, nil, fmt.Errorf(\"error getting contents of keys\")\n\t}\n\n\treturn cursor, results, nil\n}\n\nfunc (k *redisLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) {\n\tvar pageSize int64 = 50\n\tvar cursor uint64\n\tvar err error\n\tvar cmds map[string]*goredis.StringCmd\n\tdiscoveredServiceRegistrations := []*redisRegistration{}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t\t// to select all gnmic instances, matching the given prefix\n\t\t\tcursor, cmds, err = k.getBatchOfKeys(\n\t\t\t\tctx,\n\t\t\t\tfmt.Sprintf(\"%s-*\", serviceName),\n\t\t\t\tpageSize,\n\t\t\t\tcursor,\n\t\t\t)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfor _, cmd := range cmds {\n\t\t\t\tbytesVal, err := cmd.Bytes()\n\t\t\t\tif err != nil {\n\t\t\t\t\t// key removed from redis\n\t\t\t\t\t// could be that it has expired\n\t\t\t\t\t// doesn't make a difference, we skip it\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tserviceRegistration := &redisRegistration{}\n\t\t\t\tif err := json.Unmarshal(bytesVal, serviceRegistration); err != nil {\n\t\t\t\t\t// we don't have the data we expect\n\t\t\t\t\t// skip it\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdiscoveredServiceRegistrations = append(\n\t\t\t\t\tdiscoveredServiceRegistrations,\n\t\t\t\t\tserviceRegistration,\n\t\t\t\t)\n\t\t\t}\n\t\t\t// termination condition for redis scan\n\t\t\tif cursor == 0 {\n\t\t\t\tif k.Cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"got %d services from redis\", len(discoveredServiceRegistrations))\n\t\t\t\t}\n\t\t\t\t// convert discovered servicesRegistrations to services\n\t\t\t\tdiscoveredServices := make([]*lockers.Service, len(discoveredServiceRegistrations))\n\t\t\t\tfor i, registration := range discoveredServiceRegistrations {\n\t\t\t\t\t// match the required tags\n\t\t\t\t\tif !matchTags(registration.Tags, tags) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tdiscoveredServices[i] = &lockers.Service{\n\t\t\t\t\t\tID:   registration.ID,\n\t\t\t\t\t\tTags: registration.Tags,\n\t\t\t\t\t\tAddress: fmt.Sprintf(\n\t\t\t\t\t\t\t\"%s:%d\",\n\t\t\t\t\t\t\tregistration.Address,\n\t\t\t\t\t\t\tregistration.Port,\n\t\t\t\t\t\t),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn discoveredServices, nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (k *redisLocker) IsLocked(ctx context.Context, key string) (bool, error) {\n\tcount, err := k.client.Exists(ctx, key).Result()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error during redis query: %w\", err)\n\t}\n\n\tif count > 0 {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (k *redisLocker) List(ctx context.Context, prefix string) (map[string]string, error) {\n\tvar cursor uint64\n\tvar err error\n\tvar cmds map[string]*goredis.StringCmd\n\tdata := map[string]string{}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tcursor, cmds, err = k.getBatchOfKeys(\n\t\t\tctx,\n\t\t\tfmt.Sprintf(\"%s*\", prefix),\n\t\t\t100,\n\t\t\tcursor,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to fetch from redis: %w\", err)\n\t\t}\n\t\tif k.Cfg.Debug {\n\t\t\tk.logger.Printf(\n\t\t\t\t\"got %d keys from redis for prefix=%s\",\n\t\t\t\tlen(cmds),\n\t\t\t\tprefix,\n\t\t\t)\n\t\t}\n\t\tfor key, cmd := range cmds {\n\t\t\tbytesVal, err := cmd.Bytes()\n\t\t\tif err != nil {\n\t\t\t\t// key removed from redis\n\t\t\t\t// could be that it has expired\n\t\t\t\t// doesn't make a difference, we skip it\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// we add a random string at the end of the value for redis\n\t\t\t// redlock algorithm, so we need to remove it here\n\t\t\tlastIndex := bytes.LastIndex(bytesVal, []byte(\"-\"))\n\t\t\t// if it's not there, we skip the key\n\t\t\tif lastIndex < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdata[key] = string(bytesVal[:lastIndex])\n\t\t}\n\n\t\tif cursor == 0 {\n\t\t\treturn data, nil\n\t\t}\n\t}\n}\n\nfunc matchTags(tags, wantedTags []string) bool {\n\tif wantedTags == nil {\n\t\treturn true\n\t}\n\ttagsMap := map[string]struct{}{}\n\n\tfor _, t := range tags {\n\t\ttagsMap[t] = struct{}{}\n\t}\n\n\tfor _, wt := range wantedTags {\n\t\tif _, ok := tagsMap[wt]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/logging/logging.go",
    "content": "// © 2025 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage logging\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"os\"\n\t\"reflect\"\n\n\t\"github.com/openconfig/gnmic/pkg/config\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"gopkg.in/natefinch/lumberjack.v2\"\n)\n\nfunc GetLogger(level slog.Level, args ...any) *slog.Logger {\n\thandlerOptions := &slog.HandlerOptions{Level: level}\n\treturn slog.New(slog.NewTextHandler(os.Stderr, handlerOptions)).With(args...)\n}\n\nfunc NewLogger(store store.Store[any], args ...any) *slog.Logger {\n\tcfg, ok, err := store.Get(\"global-flags\", \"global-flags\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error getting global flags: %v. Building a default logger.\\n\", err)\n\t\treturn GetLogger(slog.LevelInfo, args...)\n\t}\n\tif !ok {\n\t\tfmt.Fprintf(os.Stderr, \"globalFlags is of an unexpected type: %T. Building a default logger.\\n\", reflect.TypeOf(cfg))\n\t\treturn GetLogger(slog.LevelInfo, args...)\n\t}\n\tflags := config.GlobalFlags{}\n\tswitch i := cfg.(type) {\n\tcase config.GlobalFlags:\n\t\tflags = i\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"globalFlags is of an unexpected type: %T. Building a default logger.\\n\", reflect.TypeOf(cfg))\n\t\treturn GetLogger(slog.LevelInfo, args...)\n\t}\n\tif !flags.Log {\n\t\treturn slog.New(slog.DiscardHandler)\n\t}\n\tvar level slog.Level\n\tif flags.Debug {\n\t\tlevel = slog.LevelDebug\n\t} else {\n\t\tlevel = slog.LevelInfo\n\t}\n\n\thandlerOptions := &slog.HandlerOptions{Level: level}\n\tif flags.LogFile != \"\" {\n\t\tif flags.LogMaxSize > 0 {\n\t\t\tlj := &lumberjack.Logger{\n\t\t\t\tFilename:   flags.LogFile,\n\t\t\t\tMaxSize:    flags.LogMaxSize,\n\t\t\t\tMaxBackups: flags.LogMaxBackups,\n\t\t\t\tCompress:   flags.LogCompress,\n\t\t\t}\n\t\t\treturn slog.New(slog.NewTextHandler(lj, handlerOptions)).With(args...)\n\t\t}\n\t\tf, err := os.OpenFile(flags.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error opening log file: %v\\n\", err)\n\t\t\treturn GetLogger(slog.LevelInfo, args...)\n\t\t}\n\t\treturn slog.New(slog.NewTextHandler(f, handlerOptions)).With(args...)\n\t}\n\treturn slog.New(slog.NewTextHandler(os.Stderr, handlerOptions)).With(args...)\n}\n"
  },
  {
    "path": "pkg/outputs/all/all.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// © 2025 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage all\n\nimport (\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/asciigraph_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/file\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/gnmi_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/influxdb_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/kafka_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/nats_outputs/jetstream\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/nats_outputs/nats\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/otlp_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/prometheus_output/prometheus_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/prometheus_output/prometheus_write_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/snmp_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/tcp_output\"\n\t_ \"github.com/openconfig/gnmic/pkg/outputs/udp_output\"\n)\n"
  },
  {
    "path": "pkg/outputs/asciigraph_output/asciigraph.go",
    "content": "// © 2023 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage asciigraph_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/guptarohit/asciigraph\"\n\t\"github.com/nsf/termbox-go\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tloggingPrefix       = \"[asciigraph_output:%s] \"\n\tdefaultRefreshTimer = time.Second\n\tdefaultPrecision    = 2\n\tdefaultTimeout      = 10 * time.Second\n)\n\nvar (\n\tdefaultLabelColor   = asciigraph.Blue\n\tdefaultCaptionColor = asciigraph.Default\n\tdefaultAxisColor    = asciigraph.Default\n)\n\nfunc init() {\n\toutputs.Register(\"asciigraph\", func() outputs.Output {\n\t\treturn &asciigraphOutput{\n\t\t\tcfg:     &cfg{},\n\t\t\tlogger:  log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t\teventCh: make(chan *formatters.EventMsg, 100),\n\t\t\tm:       new(sync.RWMutex),\n\t\t\tdata:    make(map[string]*series),\n\t\t\tcolors:  make(map[asciigraph.AnsiColor]struct{}),\n\t\t}\n\t})\n}\n\n// asciigraphOutput //\ntype asciigraphOutput struct {\n\toutputs.BaseOutput\n\tcfg     *cfg\n\tlogger  *log.Logger\n\teventCh chan *formatters.EventMsg\n\n\tm       *sync.RWMutex\n\tdata    map[string]*series\n\tcolors  map[asciigraph.AnsiColor]struct{}\n\tcaption string\n\n\tcaptionColor asciigraph.AnsiColor\n\taxisColor    asciigraph.AnsiColor\n\tlabelColor   asciigraph.AnsiColor\n\tevps         []formatters.EventProcessor\n\n\ttargetTpl *template.Template\n\tstore     store.Store[any]\n}\n\ntype series struct {\n\tname  string\n\tdata  []float64\n\tcolor asciigraph.AnsiColor\n}\n\n// cfg //\ntype cfg struct {\n\t// The caption to be displayed under the graph\n\tCaption string `mapstructure:\"caption,omitempty\" json:\"caption,omitempty\"`\n\t// The graph height\n\tHeight int `mapstructure:\"height,omitempty\" json:\"height,omitempty\"`\n\t// The graph width\n\tWidth int `mapstructure:\"width,omitempty\" json:\"width,omitempty\"`\n\t// The graph minimum value for the vertical axis\n\tLowerBound *float64 `mapstructure:\"lower-bound,omitempty\" json:\"lower-bound,omitempty\"`\n\t// the graph maximum value for the vertical axis\n\tUpperBound *float64 `mapstructure:\"upper-bound,omitempty\" json:\"upper-bound,omitempty\"`\n\t// The graph offset\n\tOffset int `mapstructure:\"offset,omitempty\" json:\"offset,omitempty\"`\n\t// The decimal point precision of the label values\n\tPrecision uint `mapstructure:\"precision,omitempty\" json:\"precision,omitempty\"`\n\t// The caption color\n\tCaptionColor string `mapstructure:\"caption-color,omitempty\" json:\"caption-color,omitempty\"`\n\t// The axis color\n\tAxisColor string `mapstructure:\"axis-color,omitempty\" json:\"axis-color,omitempty\"`\n\t// The label color\n\tLabelColor string `mapstructure:\"label-color,omitempty\" json:\"label-color,omitempty\"`\n\t// The graph refresh timer\n\tRefreshTimer time.Duration `mapstructure:\"refresh-timer,omitempty\" json:\"refresh-timer,omitempty\"`\n\t// Add target the received subscribe responses\n\tAddTarget string `mapstructure:\"add-target,omitempty\" json:\"add-target,omitempty\"`\n\t//\n\tTargetTemplate string `mapstructure:\"target-template,omitempty\" json:\"target-template,omitempty\"`\n\t// list of event processors\n\tEventProcessors []string `mapstructure:\"event-processors,omitempty\" json:\"event-processors,omitempty\"`\n\t// enable extra logging\n\tDebug bool `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n}\n\nfunc (a *asciigraphOutput) String() string {\n\tb, err := json.Marshal(a.cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (a *asciigraphOutput) setEventProcessors(logger *log.Logger) error {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(a.store)\n\tif err != nil {\n\t\treturn err\n\t}\n\ta.evps, err = formatters.MakeEventProcessors(\n\t\tlogger,\n\t\ta.cfg.EventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *asciigraphOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && a.logger != nil {\n\t\ta.logger.SetOutput(logger.Writer())\n\t\ta.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// Init //\nfunc (a *asciigraphOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\terr := outputs.DecodeConfig(cfg, a.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ta.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ta.store = options.Store\n\n\ta.setLogger(options.Logger)\n\n\terr = a.setEventProcessors(options.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif a.cfg.TargetTemplate == \"\" {\n\t\ta.targetTpl = outputs.DefaultTargetTemplate\n\t} else if a.cfg.AddTarget != \"\" {\n\t\ta.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", a.cfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ta.targetTpl = a.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\t// set defaults\n\terr = a.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\t//\n\tgo a.graph(ctx)\n\ta.logger.Printf(\"initialized asciigraph output: %s\", a.String())\n\treturn nil\n}\n\nfunc (a *asciigraphOutput) Update(ctx context.Context, cfg map[string]any) error {\n\treturn errors.New(\"not implemented for this output type\")\n}\n\nfunc (a *asciigraphOutput) setDefaults() error {\n\ta.labelColor = defaultLabelColor\n\tif a.cfg.LabelColor != \"\" {\n\t\tif lc, ok := asciigraph.ColorNames[a.cfg.LabelColor]; ok {\n\t\t\ta.labelColor = lc\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unknown label color %s\", a.cfg.LabelColor)\n\t\t}\n\t}\n\n\ta.captionColor = defaultCaptionColor\n\tif a.cfg.CaptionColor != \"\" {\n\t\tif lc, ok := asciigraph.ColorNames[a.cfg.CaptionColor]; ok {\n\t\t\ta.captionColor = lc\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unknown caption color %s\", a.cfg.CaptionColor)\n\t\t}\n\t}\n\n\ta.axisColor = defaultAxisColor\n\tif a.cfg.AxisColor != \"\" {\n\t\tif lc, ok := asciigraph.ColorNames[a.cfg.AxisColor]; ok {\n\t\t\ta.axisColor = lc\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unknown axis color %s\", a.cfg.AxisColor)\n\t\t}\n\n\t}\n\n\tif a.cfg.RefreshTimer <= 0 {\n\t\ta.cfg.RefreshTimer = defaultRefreshTimer\n\t}\n\tif a.cfg.Precision <= 0 {\n\t\ta.cfg.Precision = defaultPrecision\n\t}\n\n\treturn a.getTermSize()\n}\n\n// Write //\nfunc (a *asciigraphOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\tsubRsp, err := outputs.AddSubscriptionTarget(rsp, meta, a.cfg.AddTarget, a.targetTpl)\n\tif err != nil {\n\t\ta.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\treturn\n\t}\n\tevs, err := formatters.ResponseToEventMsgs(meta[\"subscription-name\"], subRsp, meta, a.evps...)\n\tif err != nil {\n\t\ta.logger.Printf(\"failed to convert messages to events: %v\", err)\n\t\treturn\n\t}\n\tfor _, ev := range evs {\n\t\ta.WriteEvent(ctx, ev)\n\t}\n}\n\nfunc (a *asciigraphOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tctx, cancel := context.WithTimeout(ctx, defaultTimeout)\n\tdefer cancel()\n\tselect {\n\tcase <-ctx.Done():\n\t\ta.logger.Printf(\"write timeout: %v\", ctx.Err())\n\tcase a.eventCh <- ev:\n\t}\n}\n\n// Close //\nfunc (a *asciigraphOutput) Close() error {\n\treturn nil\n}\n\n// Metrics //\nfunc (a *asciigraphOutput) RegisterMetrics(reg *prometheus.Registry) {\n}\n\nfunc (a *asciigraphOutput) SetName(name string) {}\n\nfunc (a *asciigraphOutput) SetClusterName(name string) {}\n\nfunc (a *asciigraphOutput) SetTargetsConfig(map[string]*types.TargetConfig) {}\n\nfunc (a *asciigraphOutput) graph(ctx context.Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase ev, ok := <-a.eventCh:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ta.plot(ev)\n\t\tcase <-time.After(a.cfg.RefreshTimer):\n\t\t\ta.plot(nil)\n\t\t}\n\t}\n}\n\nfunc (a *asciigraphOutput) plot(e *formatters.EventMsg) {\n\ta.m.Lock()\n\tdefer a.m.Unlock()\n\ta.getTermSize()\n\tif e != nil && len(e.Values) > 0 {\n\t\ta.updateData(e)\n\t}\n\n\tdata, colors := a.buildData()\n\tif len(data) == 0 {\n\t\treturn\n\t}\n\topts := []asciigraph.Option{\n\t\tasciigraph.Height(a.cfg.Height),\n\t\tasciigraph.Width(a.cfg.Width),\n\t\tasciigraph.Offset(a.cfg.Offset),\n\t\tasciigraph.Precision(a.cfg.Precision),\n\t\tasciigraph.Caption(a.caption),\n\t\tasciigraph.CaptionColor(a.captionColor),\n\t\tasciigraph.SeriesColors(colors...),\n\t\tasciigraph.AxisColor(a.axisColor),\n\t\tasciigraph.LabelColor(a.labelColor),\n\t}\n\tif a.cfg.LowerBound != nil {\n\t\topts = append(opts, asciigraph.LowerBound(*a.cfg.LowerBound))\n\t}\n\tif a.cfg.UpperBound != nil {\n\t\topts = append(opts, asciigraph.UpperBound(*a.cfg.UpperBound))\n\t}\n\tplot := asciigraph.PlotMany(data, opts...)\n\tasciigraph.Clear()\n\tfmt.Fprintln(os.Stdout, plot)\n}\n\nfunc (a *asciigraphOutput) updateData(e *formatters.EventMsg) {\n\tif e == nil {\n\t\treturn\n\t}\n\tevs := splitEvent(e)\n\tfor _, ev := range evs {\n\t\tsn := a.buildSeriesName(e)\n\t\tserie := a.getOrCreateSerie(sn)\n\t\tfor _, v := range ev.Values {\n\t\t\ti, err := toFloat(v)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tserie.data = append(serie.data, i)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (a *asciigraphOutput) getOrCreateSerie(name string) *series {\n\tserie, ok := a.data[name]\n\tif ok {\n\t\treturn serie\n\t}\n\tcolor := a.pickColor()\n\tserie = &series{\n\t\tname:  name,\n\t\tdata:  make([]float64, 0, a.cfg.Width-a.cfg.Offset),\n\t\tcolor: color,\n\t}\n\ta.data[name] = serie\n\ta.colors[serie.color] = struct{}{}\n\n\ta.setCaption()\n\treturn serie\n}\n\nfunc (a *asciigraphOutput) setCaption() {\n\tseriesNames := make([]string, 0, len(a.data))\n\tfor seriesName := range a.data {\n\t\tseriesNames = append(seriesNames, seriesName)\n\t}\n\tsort.Strings(seriesNames)\n\ta.caption = \"\"\n\tif a.cfg.Debug {\n\t\ta.caption = fmt.Sprintf(\"(h=%d,w=%d)\\n\", a.cfg.Height, a.cfg.Width)\n\t}\n\ta.caption = fmt.Sprintf(\"%s\\n\", a.cfg.Caption)\n\n\tfor _, sn := range seriesNames {\n\t\tcolor := a.data[sn].color\n\t\ta.caption += color.String() + \"-+- \" + sn + asciigraph.Default.String() + \"\\n\"\n\t}\n}\n\nfunc (a *asciigraphOutput) buildData() ([][]float64, []asciigraph.AnsiColor) {\n\tnumgraphs := len(a.data)\n\tseries := make([]*series, 0, numgraphs)\n\t// sort series by name\n\tfor _, serie := range a.data {\n\t\tsize := len(serie.data)\n\t\tif size == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif size > a.cfg.Width {\n\t\t\tserie.data = serie.data[size-a.cfg.Width:]\n\t\t}\n\t\tseries = append(series, serie)\n\t}\n\tsort.Slice(series,\n\t\tfunc(i, j int) bool {\n\t\t\treturn series[i].name < series[j].name\n\t\t})\n\n\tdata := make([][]float64, 0, numgraphs)\n\tcolors := make([]asciigraph.AnsiColor, 0, numgraphs)\n\t// get float slices and colors\n\tfor _, serie := range series {\n\t\tdata = append(data, serie.data)\n\t\tcolors = append(colors, serie.color)\n\t}\n\treturn data, colors\n}\n\nfunc splitEvent(e *formatters.EventMsg) []*formatters.EventMsg {\n\tnumVals := len(e.Values)\n\tswitch numVals {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn []*formatters.EventMsg{e}\n\t}\n\n\tevs := make([]*formatters.EventMsg, 0, numVals)\n\tfor k, v := range e.Values {\n\t\tev := &formatters.EventMsg{\n\t\t\tName:      e.Name,\n\t\t\tTimestamp: e.Timestamp,\n\t\t\tTags:      e.Tags,\n\t\t\tValues:    map[string]interface{}{k: v},\n\t\t}\n\t\tevs = append(evs, ev)\n\t}\n\treturn evs\n}\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\nfunc (a *asciigraphOutput) buildSeriesName(e *formatters.EventMsg) string {\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tsb.WriteString(e.Name)\n\tsb.WriteString(\":\")\n\tfor k := range e.Values {\n\t\tsb.WriteString(k)\n\t}\n\tnumTags := len(e.Tags)\n\tif numTags == 0 {\n\t\treturn sb.String()\n\t}\n\tsb.WriteString(\"{\")\n\ttagNames := make([]string, 0, numTags)\n\tfor k := range e.Tags {\n\t\ttagNames = append(tagNames, k)\n\t}\n\tsort.Strings(tagNames)\n\tfor i, tn := range tagNames {\n\t\tfmt.Fprintf(sb, \"%s=%s\", tn, e.Tags[tn])\n\t\tif numTags != i+1 {\n\t\t\tsb.WriteString(\", \")\n\t\t}\n\t}\n\tsb.WriteString(\"}\")\n\treturn sb.String()\n}\n\nfunc toFloat(v interface{}) (float64, error) {\n\tswitch i := v.(type) {\n\tcase float64:\n\t\treturn float64(i), nil\n\tcase float32:\n\t\treturn float64(i), nil\n\tcase int64:\n\t\treturn float64(i), nil\n\tcase int32:\n\t\treturn float64(i), nil\n\tcase int16:\n\t\treturn float64(i), nil\n\tcase int8:\n\t\treturn float64(i), nil\n\tcase uint64:\n\t\treturn float64(i), nil\n\tcase uint32:\n\t\treturn float64(i), nil\n\tcase uint16:\n\t\treturn float64(i), nil\n\tcase uint8:\n\t\treturn float64(i), nil\n\tcase int:\n\t\treturn float64(i), nil\n\tcase uint:\n\t\treturn float64(i), nil\n\tcase string:\n\t\tf, err := strconv.ParseFloat(i, 64)\n\t\tif err != nil {\n\t\t\treturn math.NaN(), err\n\t\t}\n\t\treturn f, err\n\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\tcase *gnmi.Decimal64:\n\t\treturn float64(i.Digits) / math.Pow10(int(i.Precision)), nil\n\tdefault:\n\t\treturn math.NaN(), errors.New(\"getFloat: unknown value is of incompatible type\")\n\t}\n}\n\nfunc (a *asciigraphOutput) pickColor() asciigraph.AnsiColor {\n\tfor _, c := range asciigraph.ColorNames {\n\t\tif _, ok := a.colors[c]; !ok {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (a *asciigraphOutput) getTermSize() error {\n\terr := termbox.Init()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not initialize a terminal box: %v\", err)\n\t}\n\tw, h := termbox.Size()\n\ttermbox.Close()\n\tif a.cfg.Width <= 0 || a.cfg.Width > w-10 {\n\t\ta.cfg.Width = w - 10\n\t}\n\tnumSeries := len(a.data)\n\tif a.cfg.Height <= 0 || a.cfg.Height > h-(numSeries+1)-5 {\n\t\ta.cfg.Height = h - (numSeries + 1) - 5\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/outputs/file/file_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar numberOfWrittenBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_output\",\n\tName:      \"number_bytes_written_total\",\n\tHelp:      \"Number of bytes written to file output\",\n}, []string{\"name\", \"file_name\"})\n\nvar numberOfReceivedMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_output\",\n\tName:      \"number_messages_received_total\",\n\tHelp:      \"Number of messages received by file output\",\n}, []string{\"name\", \"file_name\"})\n\nvar numberOfWrittenMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_output\",\n\tName:      \"number_messages_writes_total\",\n\tHelp:      \"Number of messages written to file output\",\n}, []string{\"name\", \"file_name\"})\n\nvar numberOfFailWriteMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"file_output\",\n\tName:      \"number_messages_writes_fail_total\",\n\tHelp:      \"Number of failed message writes to file output\",\n}, []string{\"name\", \"file_name\", \"reason\"})\n\nfunc (f *File) initMetrics(name string) {\n\tnumberOfWrittenBytes.WithLabelValues(name, \"\").Add(0)\n\tnumberOfReceivedMsgs.WithLabelValues(name, \"\").Add(0)\n\tnumberOfWrittenMsgs.WithLabelValues(name, \"\").Add(0)\n\tnumberOfFailWriteMsgs.WithLabelValues(name, \"\", \"\").Add(0)\n}\n\nfunc (f *File) registerMetrics() error {\n\tcfg := f.cfg.Load()\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif f.reg == nil {\n\t\tf.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn nil\n\t}\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = f.reg.Register(numberOfWrittenBytes); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = f.reg.Register(numberOfReceivedMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = f.reg.Register(numberOfWrittenMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = f.reg.Register(numberOfFailWriteMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\tf.initMetrics(cfg.Name)\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/file/file_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage file\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"slices\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"golang.org/x/sync/semaphore\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultFormat           = \"json\"\n\tdefaultWriteConcurrency = 1000\n\tdefaultSeparator        = \"\\n\"\n\tloggingPrefix           = \"[file_output:%s] \"\n)\n\nconst (\n\toutputType      = \"file\"\n\tfileType_STDOUT = \"stdout\"\n\tfileType_STDERR = \"stderr\"\n)\n\nfunc init() {\n\toutputs.Register(outputType, func() outputs.Output {\n\t\treturn &File{}\n\t})\n}\n\nfunc (f *File) init() {\n\tf.cfg = new(atomic.Pointer[config])\n\tf.dynCfg = new(atomic.Pointer[dynConfig])\n\tf.file = new(atomic.Pointer[file])\n\tf.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\n// File //\ntype File struct {\n\toutputs.BaseOutput\n\n\tcfg    *atomic.Pointer[config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\tfile   *atomic.Pointer[file]\n\n\tlogger *log.Logger\n\tsem    *semaphore.Weighted\n\n\treg   *prometheus.Registry\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tmsgTpl    *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n}\n\n// config //\ntype config struct {\n\tName               string          `mapstructure:\"name,omitempty\"`\n\tFileName           string          `mapstructure:\"filename,omitempty\"`\n\tFileType           string          `mapstructure:\"file-type,omitempty\"`\n\tFormat             string          `mapstructure:\"format,omitempty\"`\n\tMultiline          bool            `mapstructure:\"multiline,omitempty\"`\n\tIndent             string          `mapstructure:\"indent,omitempty\"`\n\tSeparator          string          `mapstructure:\"separator,omitempty\"`\n\tSplitEvents        bool            `mapstructure:\"split-events,omitempty\"`\n\tOverrideTimestamps bool            `mapstructure:\"override-timestamps,omitempty\"`\n\tAddTarget          string          `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string          `mapstructure:\"target-template,omitempty\"`\n\tEventProcessors    []string        `mapstructure:\"event-processors,omitempty\"`\n\tMsgTemplate        string          `mapstructure:\"msg-template,omitempty\"`\n\tConcurrencyLimit   int             `mapstructure:\"concurrency-limit,omitempty\"`\n\tEnableMetrics      bool            `mapstructure:\"enable-metrics,omitempty\"`\n\tDebug              bool            `mapstructure:\"debug,omitempty\"`\n\tCalculateLatency   bool            `mapstructure:\"calculate-latency,omitempty\"`\n\tRotation           *rotationConfig `mapstructure:\"rotation,omitempty\"`\n}\n\ntype file interface {\n\tClose() error\n\tName() string\n\tWrite([]byte) (int, error)\n}\n\nfunc (f *File) String() string {\n\tcfg := f.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (f *File) setDefaults(cfg *config) error {\n\tif cfg.Format == \"proto\" {\n\t\treturn fmt.Errorf(\"proto format not supported in output type 'file'\")\n\t}\n\tif cfg.Separator == \"\" {\n\t\tcfg.Separator = defaultSeparator\n\t}\n\tif cfg.FileName == \"\" && cfg.FileType == \"\" {\n\t\tcfg.FileType = fileType_STDOUT\n\t}\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif cfg.FileType == fileType_STDOUT || cfg.FileType == fileType_STDERR {\n\t\tcfg.Indent = \"  \"\n\t\tcfg.Multiline = true\n\t}\n\tif cfg.Multiline && cfg.Indent == \"\" {\n\t\tcfg.Indent = \"  \"\n\t}\n\tif cfg.ConcurrencyLimit < 1 {\n\t\tswitch cfg.FileType {\n\t\tcase fileType_STDOUT, fileType_STDERR:\n\t\t\tcfg.ConcurrencyLimit = 1\n\t\tdefault:\n\t\t\tcfg.ConcurrencyLimit = defaultWriteConcurrency\n\t\t}\n\t}\n\treturn nil\n}\n\n// Init //\nfunc (f *File) Init(ctx context.Context, name string, cfg map[string]any, opts ...outputs.Option) error {\n\tf.init()\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\tf.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tf.store = options.Store\n\n\t// apply logger\n\tf.setLogger(options.Logger)\n\n\terr = f.setDefaults(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// store config\n\tf.cfg.Store(newCfg)\n\n\t// initialize registry\n\tf.reg = options.Registry\n\terr = f.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize semaphore\n\tf.sem = semaphore.NewWeighted(int64(newCfg.ConcurrencyLimit))\n\n\t// build dynamic config\n\tdc := new(dynConfig)\n\n\t// initialize event processors\n\tdc.evps, err = f.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tMultiline:        newCfg.Multiline,\n\t\tIndent:           newCfg.Indent,\n\t\tFormat:           newCfg.Format,\n\t\tOverrideTS:       newCfg.OverrideTimestamps,\n\t\tCalculateLatency: newCfg.CalculateLatency,\n\t}\n\n\t// create templates\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tif newCfg.MsgTemplate != \"\" {\n\t\tdc.msgTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-msg-template\", name), newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tf.dynCfg.Store(dc)\n\n\t// initialize file\n\tnewFile, err := f.openFile(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.file.Store(&newFile)\n\n\tf.logger.Printf(\"initialized file output: %s\", f.String())\n\treturn nil\n}\n\nfunc (f *File) Validate(cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Format == \"proto\" {\n\t\treturn fmt.Errorf(\"proto format not supported in output type 'file'\")\n\t}\n\treturn nil\n}\n\nfunc (f *File) openFile(cfg *config) (file, error) {\n\tvar fileHandle file\n\tvar err error\n\n\tswitch cfg.FileType {\n\tcase fileType_STDOUT:\n\t\treturn os.Stdout, nil\n\tcase fileType_STDERR:\n\t\treturn os.Stderr, nil\n\tdefault:\n\tCRFILE:\n\t\tif cfg.Rotation != nil {\n\t\t\tfileHandle = newRotatingFile(cfg)\n\t\t} else {\n\t\t\tfileHandle, err = os.OpenFile(cfg.FileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\t\t\tif err != nil {\n\t\t\t\tf.logger.Printf(\"failed to create file: %v\", err)\n\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\tgoto CRFILE\n\t\t\t}\n\t\t}\n\t}\n\treturn fileHandle, nil\n}\n\nfunc (f *File) Update(ctx context.Context, cfgMap map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfgMap, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrCfg := f.cfg.Load()\n\tif newCfg.Name == \"\" && currCfg != nil {\n\t\tnewCfg.Name = currCfg.Name\n\t}\n\n\terr = f.setDefaults(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if we need to rebuild processors\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\t// build new dynamic config\n\tdc := new(dynConfig)\n\n\t// rebuild processors if needed\n\tprevDC := f.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = f.buildEventProcessors(f.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tMultiline:        newCfg.Multiline,\n\t\tIndent:           newCfg.Indent,\n\t\tFormat:           newCfg.Format,\n\t\tOverrideTS:       newCfg.OverrideTimestamps,\n\t\tCalculateLatency: newCfg.CalculateLatency,\n\t}\n\n\t// rebuild templates\n\tvar targetTpl *template.Template\n\tif newCfg.TargetTemplate == \"\" {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\ttargetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargetTpl = targetTpl.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t}\n\tdc.targetTpl = targetTpl\n\n\tif newCfg.MsgTemplate != \"\" {\n\t\tdc.msgTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-msg-template\", newCfg.Name), newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\t// store new dynamic config\n\tf.dynCfg.Store(dc)\n\n\t// check if file needs to be reopened\n\tneedsFileReopen := fileNeedsReopen(currCfg, newCfg)\n\n\tif needsFileReopen {\n\t\t// open new file\n\t\tnewFile, err := f.openFile(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// swap file handle\n\t\toldFile := f.file.Swap(&newFile)\n\n\t\t// close old file (but not stdout/stderr)\n\t\tif oldFile != nil && *oldFile != os.Stdout && *oldFile != os.Stderr {\n\t\t\t(*oldFile).Close()\n\t\t}\n\t}\n\n\t// update semaphore if concurrency limit changed\n\tif currCfg == nil || currCfg.ConcurrencyLimit != newCfg.ConcurrencyLimit {\n\t\tf.sem = semaphore.NewWeighted(int64(newCfg.ConcurrencyLimit))\n\t}\n\n\t// store new config\n\tf.cfg.Store(newCfg)\n\n\tf.logger.Printf(\"updated file output: %s\", f.String())\n\treturn nil\n}\n\nfunc (f *File) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := f.cfg.Load()\n\tdc := f.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tf.logger,\n\t\tf.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tf.dynCfg.Store(&newDC)\n\t\tf.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\n// Write //\nfunc (f *File) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\t// load current config and file\n\tcfg := f.cfg.Load()\n\tdc := f.dynCfg.Load()\n\tfileHandle := f.file.Load()\n\n\tif cfg == nil || dc == nil || fileHandle == nil {\n\t\treturn\n\t}\n\n\terr := f.sem.Acquire(ctx, 1)\n\tif errors.Is(err, context.Canceled) {\n\t\treturn\n\t}\n\tif err != nil {\n\t\tf.logger.Printf(\"failed acquiring semaphore: %v\", err)\n\t\treturn\n\t}\n\tdefer f.sem.Release(1)\n\n\tnumberOfReceivedMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc()\n\n\trsp, err = outputs.AddSubscriptionTarget(rsp, meta, cfg.AddTarget, dc.targetTpl)\n\tif err != nil {\n\t\tf.logger.Printf(\"failed to add target to the response: %v\", err)\n\t}\n\n\tbb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...)\n\tif err != nil {\n\t\tif cfg.Debug {\n\t\t\tf.logger.Printf(\"failed marshaling proto msg: %v\", err)\n\t\t}\n\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"marshal_error\").Inc()\n\t\treturn\n\t}\n\tif len(bb) == 0 {\n\t\treturn\n\t}\n\n\tfor _, b := range bb {\n\t\tif dc.msgTpl != nil {\n\t\t\tb, err = outputs.ExecTemplate(b, dc.msgTpl)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tlog.Printf(\"failed to execute template: %v\", err)\n\t\t\t\t}\n\t\t\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"template_error\").Inc()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tn, err := (*fileHandle).Write(append(b, []byte(cfg.Separator)...))\n\t\tif err != nil {\n\t\t\tif cfg.Debug {\n\t\t\t\tf.logger.Printf(\"failed to write to file '%s': %v\", (*fileHandle).Name(), err)\n\t\t\t}\n\t\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"write_error\").Inc()\n\t\t\treturn\n\t\t}\n\t\tnumberOfWrittenBytes.WithLabelValues(cfg.Name, (*fileHandle).Name()).Add(float64(n))\n\t\tnumberOfWrittenMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc()\n\t}\n}\n\nfunc (f *File) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t}\n\n\t// load current config and file\n\tcfg := f.cfg.Load()\n\tdc := f.dynCfg.Load()\n\tfileHandle := f.file.Load()\n\n\tif cfg == nil || dc == nil || fileHandle == nil {\n\t\treturn\n\t}\n\n\tvar evs = []*formatters.EventMsg{ev}\n\tfor _, proc := range dc.evps {\n\t\tevs = proc.Apply(evs...)\n\t}\n\n\ttoWrite := []byte{}\n\tif cfg.SplitEvents {\n\t\tfor _, pev := range evs {\n\t\t\tvar err error\n\t\t\tvar b []byte\n\t\t\tif cfg.Multiline {\n\t\t\t\tb, err = json.MarshalIndent(pev, \"\", cfg.Indent)\n\t\t\t} else {\n\t\t\t\tb, err = json.Marshal(pev)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"failed to WriteEvent: %v\", err)\n\t\t\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"marshal_error\").Inc()\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttoWrite = append(toWrite, b...)\n\t\t\ttoWrite = append(toWrite, []byte(cfg.Separator)...)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tvar b []byte\n\t\tif cfg.Multiline {\n\t\t\tb, err = json.MarshalIndent(evs, \"\", cfg.Indent)\n\t\t} else {\n\t\t\tb, err = json.Marshal(evs)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed to WriteEvent: %v\", err)\n\t\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"marshal_error\").Inc()\n\t\t\treturn\n\t\t}\n\t\ttoWrite = append(toWrite, b...)\n\t\ttoWrite = append(toWrite, []byte(cfg.Separator)...)\n\t}\n\n\tn, err := (*fileHandle).Write(toWrite)\n\tif err != nil {\n\t\tfmt.Printf(\"failed to WriteEvent: %v\", err)\n\t\tnumberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), \"write_error\").Inc()\n\t\treturn\n\t}\n\tnumberOfWrittenBytes.WithLabelValues(cfg.Name, (*fileHandle).Name()).Add(float64(n))\n\tnumberOfWrittenMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc()\n}\n\n// Close //\nfunc (f *File) Close() error {\n\tfileHandle := f.file.Load()\n\tif fileHandle == nil {\n\t\treturn nil\n\t}\n\tf.logger.Printf(\"closing file '%s' output\", (*fileHandle).Name())\n\treturn (*fileHandle).Close()\n}\n\nfunc (f *File) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(f.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (f *File) setLogger(logger *log.Logger) {\n\tif logger != nil && f.logger != nil {\n\t\tf.logger.SetOutput(logger.Writer())\n\t\tf.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc fileNeedsReopen(old, new *config) bool {\n\tif old == nil || new == nil {\n\t\treturn true\n\t}\n\t// file needs to be reopened if file-related settings changed\n\treturn old.FileName != new.FileName ||\n\t\told.FileType != new.FileType ||\n\t\trotationChanged(old.Rotation, new.Rotation)\n}\n\nfunc rotationChanged(old, new *rotationConfig) bool {\n\tif old == nil && new == nil {\n\t\treturn false\n\t}\n\tif old == nil || new == nil {\n\t\treturn true\n\t}\n\t// compare rotation config fields\n\treturn old.MaxSize != new.MaxSize ||\n\t\told.MaxAge != new.MaxAge ||\n\t\told.MaxBackups != new.MaxBackups\n}\n"
  },
  {
    "path": "pkg/outputs/file/rotating_file.go",
    "content": "package file\n\nimport (\n\t\"gopkg.in/natefinch/lumberjack.v2\"\n)\n\n// RotationConfig manages configuration around file rotation\ntype rotationConfig struct {\n\tMaxSize    int  `mapstructure:\"max-size,omitempty\"`\n\tMaxBackups int  `mapstructure:\"max-backups,omitempty\"`\n\tMaxAge     int  `mapstructure:\"max-age,omitempty\"`\n\tCompress   bool `mapstructure:\"compress,omitempty\"`\n}\n\nfunc (r *rotationConfig) SetDefaults() {\n\tif r.MaxSize == 0 {\n\t\tr.MaxSize = 100\n\t}\n\tif r.MaxBackups == 0 {\n\t\tr.MaxBackups = 3\n\t}\n\n\tif r.MaxAge == 0 {\n\t\tr.MaxAge = 30\n\t}\n}\n\ntype rotatingFile struct {\n\tl *lumberjack.Logger\n}\n\n// newRotatingFile initialize the lumberjack instance\nfunc newRotatingFile(cfg *config) *rotatingFile {\n\tcfg.Rotation.SetDefaults()\n\n\tlj := lumberjack.Logger{\n\t\tFilename:   cfg.FileName,\n\t\tMaxSize:    cfg.Rotation.MaxSize,\n\t\tMaxBackups: cfg.Rotation.MaxBackups,\n\t\tMaxAge:     cfg.Rotation.MaxAge,\n\t\tCompress:   cfg.Rotation.Compress,\n\t}\n\n\treturn &rotatingFile{l: &lj}\n}\n\n// Close closes the file\nfunc (r *rotatingFile) Close() error {\n\treturn r.l.Close()\n}\n\n// Name returns the name of the file\nfunc (r *rotatingFile) Name() string {\n\treturn r.l.Filename\n}\n\n// Write implements io.Writer\nfunc (r *rotatingFile) Write(b []byte) (int, error) {\n\treturn r.l.Write(b)\n}\n"
  },
  {
    "path": "pkg/outputs/gnmi_output/gnmi_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"text/template\"\n\n\tgrpc_prometheus \"github.com/grpc-ecosystem/go-grpc-prometheus\"\n\t\"github.com/openconfig/gnmi/cache\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"golang.org/x/sync/semaphore\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\nconst (\n\tloggingPrefix           = \"[gnmi_output:%s] \"\n\tdefaultMaxSubscriptions = 64\n\tdefaultMaxGetRPC        = 64\n\tdefaultAddress          = \":57400\"\n)\n\nfunc init() {\n\toutputs.Register(\"gnmi\", func() outputs.Output {\n\t\treturn &gNMIOutput{\n\t\t\tcfg:    new(config),\n\t\t\tlogger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags),\n\t\t}\n\t})\n}\n\n// gNMIOutput //\ntype gNMIOutput struct {\n\toutputs.BaseOutput\n\tcfg       *config\n\tlogger    *log.Logger\n\ttargetTpl *template.Template\n\t//\n\tsrv     *server\n\tgrpcSrv *grpc.Server\n\tc       *cache.Cache\n\treg     *prometheus.Registry\n}\n\ntype config struct {\n\t//Name             string `mapstructure:\"name,omitempty\"`\n\tAddress          string           `mapstructure:\"address,omitempty\"`\n\tTargetTemplate   string           `mapstructure:\"target-template,omitempty\"`\n\tMaxSubscriptions int64            `mapstructure:\"max-subscriptions,omitempty\"`\n\tMaxUnaryRPC      int64            `mapstructure:\"max-unary-rpc,omitempty\"`\n\tTLS              *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\tEnableMetrics    bool             `mapstructure:\"enable-metrics,omitempty\"`\n\tDebug            bool             `mapstructure:\"debug,omitempty\"`\n}\n\nfunc (g *gNMIOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\terr := outputs.DecodeConfig(cfg, g.cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.c = cache.New(nil)\n\tg.srv = g.newServer()\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif options.Logger != nil && g.logger != nil {\n\t\tg.logger.SetOutput(options.Logger.Writer())\n\t\tg.logger.SetFlags(options.Logger.Flags())\n\t}\n\tg.reg = options.Registry\n\tg.registerMetrics()\n\terr = g.setDefaults()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\tif g.targetTpl == nil {\n\t\tg.targetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf(\"%s-target-template\", name), g.cfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = g.startGRPCServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.logger.Printf(\"started gnmi output: %v\", g)\n\treturn nil\n}\n\nfunc (g *gNMIOutput) Update(ctx context.Context, cfg map[string]any) error {\n\treturn errors.New(\"not implemented for this output type\")\n}\n\nfunc (g *gNMIOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tvar err error\n\trsp, err = outputs.AddSubscriptionTarget(rsp, meta, \"if-not-present\", g.targetTpl)\n\tif err != nil {\n\t\tg.logger.Printf(\"failed to add target to the response: %v\", err)\n\t}\n\tswitch rsp := rsp.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rsp := rsp.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\ttarget := rsp.Update.GetPrefix().GetTarget()\n\t\t\tif target == \"\" {\n\t\t\t\tg.logger.Printf(\"response missing target\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !g.c.HasTarget(target) {\n\t\t\t\tg.c.Add(target)\n\t\t\t\tg.logger.Printf(\"target %q added to the local cache\", target)\n\t\t\t}\n\t\t\tif g.cfg.Debug {\n\t\t\t\tg.logger.Printf(\"updating target %q local cache\", target)\n\t\t\t}\n\t\t\terr = g.c.GnmiUpdate(rsp.Update)\n\t\t\tif err != nil {\n\t\t\t\tg.logger.Printf(\"failed to update gNMI cache: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase *gnmi.SubscribeResponse_SyncResponse:\n\t\t}\n\t}\n}\n\nfunc (g *gNMIOutput) WriteEvent(context.Context, *formatters.EventMsg) {}\n\nfunc (g *gNMIOutput) Close() error {\n\t//g.teardown()\n\tg.grpcSrv.Stop()\n\treturn nil\n}\n\nfunc (g *gNMIOutput) registerMetrics() {\n\tif !g.cfg.EnableMetrics {\n\t\treturn\n\t}\n\tif g.reg == nil {\n\t\tg.logger.Printf(\"ERR: output metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn\n\t}\n\tsrvMetrics := grpc_prometheus.NewServerMetrics()\n\tsrvMetrics.InitializeMetrics(g.grpcSrv)\n\tif err := g.reg.Register(srvMetrics); err != nil {\n\t\tg.logger.Printf(\"failed to register prometheus metrics: %v\", err)\n\t}\n}\n\nfunc (g *gNMIOutput) String() string {\n\tb, err := json.Marshal(g.cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (g *gNMIOutput) setDefaults() error {\n\tif g.cfg.Address == \"\" {\n\t\tg.cfg.Address = defaultAddress\n\t}\n\tif g.cfg.TargetTemplate == \"\" {\n\t\tg.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\tif g.cfg.MaxSubscriptions <= 0 {\n\t\tg.cfg.MaxSubscriptions = defaultMaxSubscriptions\n\t}\n\tif g.cfg.MaxUnaryRPC <= 0 {\n\t\tg.cfg.MaxUnaryRPC = defaultMaxGetRPC\n\t}\n\treturn nil\n}\n\nfunc (g *gNMIOutput) startGRPCServer() error {\n\tg.srv.subscribeRPCsem = semaphore.NewWeighted(g.cfg.MaxSubscriptions)\n\tg.srv.unaryRPCsem = semaphore.NewWeighted(g.cfg.MaxUnaryRPC)\n\tg.c.SetClient(g.srv.Update)\n\n\tvar l net.Listener\n\tvar err error\n\tnetwork := \"tcp\"\n\taddr := g.cfg.Address\n\tif strings.HasPrefix(g.cfg.Address, \"unix://\") {\n\t\tnetwork = \"unix\"\n\t\taddr = strings.TrimPrefix(addr, \"unix://\")\n\t}\n\tl, err = net.Listen(network, addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\topts, err := g.serverOpts()\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.grpcSrv = grpc.NewServer(opts...)\n\tgnmi.RegisterGNMIServer(g.grpcSrv, g.srv)\n\tgo g.grpcSrv.Serve(l)\n\treturn nil\n}\n\nfunc (g *gNMIOutput) serverOpts() ([]grpc.ServerOption, error) {\n\topts := make([]grpc.ServerOption, 0)\n\tif g.cfg.EnableMetrics {\n\t\topts = append(opts, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor))\n\t}\n\tif g.cfg.TLS == nil {\n\t\treturn opts, nil\n\t}\n\n\ttlscfg, err := utils.NewTLSConfig(\n\t\tg.cfg.TLS.CaFile,\n\t\tg.cfg.TLS.CertFile,\n\t\tg.cfg.TLS.KeyFile,\n\t\tg.cfg.TLS.ClientAuth,\n\t\tfalse,\n\t\ttrue,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlscfg != nil {\n\t\topts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg)))\n\t}\n\n\treturn opts, nil\n}\n"
  },
  {
    "path": "pkg/outputs/gnmi_output/gnmi_server.go",
    "content": "/*\nCopyright 2017 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n    http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This gNMI server implementation is based on the one found here:\n// https://github.com/openconfig/gnmi/blob/c69a5df04b5329d70e3e76afa773669527cfad9b/subscribe/subscribe.go\n\npackage gnmi_output\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\n\t\"golang.org/x/sync/semaphore\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\n\t\"github.com/openconfig/gnmi/cache\"\n\t\"github.com/openconfig/gnmi/coalesce\"\n\t\"github.com/openconfig/gnmi/ctree\"\n\t\"github.com/openconfig/gnmi/match\"\n\t\"github.com/openconfig/gnmi/path\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmi/subscribe\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\ntype streamClient struct {\n\ttarget  string\n\treq     *gnmi.SubscribeRequest\n\tqueue   *coalesce.Queue\n\tstream  gnmi.GNMI_SubscribeServer\n\terrChan chan<- error\n}\n\ntype server struct {\n\tgnmi.UnimplementedGNMIServer\n\t//\n\tl               *log.Logger\n\tc               *cache.Cache\n\tm               *match.Match\n\tsubscribeRPCsem *semaphore.Weighted\n\tunaryRPCsem     *semaphore.Weighted\n\t//\n\tmu      *sync.RWMutex\n\ttargets map[string]*types.TargetConfig\n}\n\ntype matchClient struct {\n\tqueue *coalesce.Queue\n\terr   error\n}\n\ntype syncMarker struct{}\n\ntype resp struct {\n\tstream gnmi.GNMI_SubscribeServer\n\tn      *ctree.Leaf\n\tdup    uint32\n}\n\nfunc (m *matchClient) Update(n interface{}) {\n\tif m.err != nil {\n\t\treturn\n\t}\n\t_, m.err = m.queue.Insert(n)\n}\n\nfunc (g *gNMIOutput) newServer() *server {\n\treturn &server{\n\t\tl:       g.logger,\n\t\tc:       g.c,\n\t\tm:       match.New(),\n\t\tmu:      new(sync.RWMutex),\n\t\ttargets: make(map[string]*types.TargetConfig),\n\t}\n}\n\nfunc (s *server) Update(n *ctree.Leaf) {\n\tswitch v := n.Value().(type) {\n\tcase *gnmi.Notification:\n\t\tsubscribe.UpdateNotification(s.m, n, v, path.ToStrings(v.Prefix, true))\n\tdefault:\n\t\ts.l.Printf(\"unexpected update type: %T\", v)\n\t}\n}\n\nfunc addSubscription(m *match.Match, s *gnmi.SubscriptionList, c *matchClient) func() {\n\tremoves := make([]func(), 0, len(s.GetSubscription()))\n\tprefix := path.ToStrings(s.GetPrefix(), true)\n\tfor _, p := range s.GetSubscription() {\n\t\tif p.GetPath() == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tpath := append(prefix, path.ToStrings(p.GetPath(), false)...)\n\t\tremoves = append(removes, m.AddQuery(path, c))\n\t}\n\treturn func() {\n\t\tfor _, remove := range removes {\n\t\t\tremove()\n\t\t}\n\t}\n}\n\nfunc (s *server) handleSubscriptionRequest(sc *streamClient) {\n\tvar err error\n\ts.l.Printf(\"processing subscription to target %q\", sc.target)\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ts.l.Printf(\"error processing subscription to target %q: %v\", sc.target, err)\n\t\t\tsc.queue.Close()\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\ts.l.Printf(\"subscription request to target %q processed\", sc.target)\n\t}()\n\n\tif !sc.req.GetSubscribe().GetUpdatesOnly() {\n\t\tfor _, sub := range sc.req.GetSubscribe().GetSubscription() {\n\t\t\tvar fp []string\n\t\t\tfp, err = path.CompletePath(sc.req.GetSubscribe().GetPrefix(), sub.GetPath())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = s.c.Query(sc.target, fp,\n\t\t\t\tfunc(_ []string, l *ctree.Leaf, _ interface{}) error {\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t_, err = sc.queue.Insert(l)\n\t\t\t\t\treturn nil\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\ts.l.Printf(\"target %q failed internal cache query: %v\", sc.target, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t_, err = sc.queue.Insert(syncMarker{})\n}\n\nfunc (s *server) sendStreamingResults(sc *streamClient) {\n\tctx := sc.stream.Context()\n\tpeer, _ := peer.FromContext(ctx)\n\ts.l.Printf(\"sending streaming results from target %q to peer %q\", sc.target, peer.Addr)\n\tdefer s.subscribeRPCsem.Release(1)\n\tfor {\n\t\titem, dup, err := sc.queue.Next(ctx)\n\t\tif coalesce.IsClosedQueue(err) {\n\t\t\tsc.errChan <- nil\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\tif _, ok := item.(syncMarker); ok {\n\t\t\terr = sc.stream.Send(&gnmi.SubscribeResponse{\n\t\t\t\tResponse: &gnmi.SubscribeResponse_SyncResponse{\n\t\t\t\t\tSyncResponse: true,\n\t\t\t\t}})\n\t\t\tif err != nil {\n\t\t\t\tsc.errChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tnode, ok := item.(*ctree.Leaf)\n\t\tif !ok || node == nil {\n\t\t\tsc.errChan <- status.Errorf(codes.Internal, \"invalid cache node: %+v\", item)\n\t\t\treturn\n\t\t}\n\t\terr = s.sendSubscribeResponse(&resp{\n\t\t\tstream: sc.stream,\n\t\t\tn:      node,\n\t\t\tdup:    dup,\n\t\t}, sc)\n\t\tif err != nil {\n\t\t\ts.l.Printf(\"target %q: failed sending subscribeResponse: %v\", sc.target, err)\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\t// TODO: check if target was deleted ? necessary ?\n\t}\n}\n\nfunc (s *server) handlePolledSubscription(sc *streamClient) {\n\ts.handleSubscriptionRequest(sc)\n\tvar err error\n\tfor {\n\t\tif sc.queue.IsClosed() {\n\t\t\treturn\n\t\t}\n\t\t_, err = sc.stream.Recv()\n\t\tif errors.Is(err, io.EOF) {\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ts.l.Printf(\"target %q: failed poll subscription rcv: %v\", sc.target, err)\n\t\t\tsc.errChan <- err\n\t\t\treturn\n\t\t}\n\t\ts.l.Printf(\"target %q: repoll\", sc.target)\n\t\ts.handleSubscriptionRequest(sc)\n\t\ts.l.Printf(\"target %q: repoll done\", sc.target)\n\t}\n}\n\nfunc (s *server) sendSubscribeResponse(r *resp, _ *streamClient) error {\n\tnotif, err := makeSubscribeResponse(r.n.Value(), r.dup)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unknown, \"unknown error: %v\", err)\n\t}\n\t// No acls\n\treturn r.stream.Send(notif)\n}\n\nfunc makeSubscribeResponse(n interface{}, _ uint32) (*gnmi.SubscribeResponse, error) {\n\tvar notification *gnmi.Notification\n\tvar ok bool\n\tnotification, ok = n.(*gnmi.Notification)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"invalid notification type: %#v\", n)\n\t}\n\n\treturn &gnmi.SubscribeResponse{\n\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\tUpdate: notification,\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/outputs/gnmi_output/gnmi_server_get.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_output\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (s *server) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\tok := s.unaryRPCsem.TryAcquire(1)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.ResourceExhausted, \"max number of Unary RPC reached\")\n\t}\n\tdefer s.unaryRPCsem.Release(1)\n\n\tnumPaths := len(req.GetPath())\n\tif numPaths == 0 && req.GetPrefix() == nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing path\")\n\t}\n\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\torigins := make(map[string]struct{})\n\tfor _, p := range req.GetPath() {\n\t\torigins[p.GetOrigin()] = struct{}{}\n\t\tif p.GetOrigin() != \"gnmic\" {\n\t\t\tif _, ok := origins[\"gnmic\"]; ok {\n\t\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"combining `gnmic` origin with other origin values is not supported\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := origins[\"gnmic\"]; ok {\n\t\treturn s.handlegNMIcInternalGet(ctx, req)\n\t}\n\n\ttargetName := req.GetPrefix().GetTarget()\n\tpeer, _ := peer.FromContext(ctx)\n\ts.l.Printf(\"received Get request from %q to target %q\", peer.Addr, targetName)\n\n\ttargets, err := s.selectTargets(targetName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target %q\", targetName)\n\t}\n\tresults := make(chan *gnmi.Notification)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.GetResponse{\n\t\t// assume one notification per path per target\n\t\tNotification: make([]*gnmi.Notification, 0, numTargets*numPaths),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase notif, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Notification = append(response.Notification, notif)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\tfor name, tc := range targets {\n\t\tgo func(name string, tc *types.TargetConfig) {\n\t\t\t// name = outputs.GetHost(name)\n\t\t\tdefer wg.Done()\n\t\t\tt := target.NewTarget(tc)\n\t\t\tctx, cancel := context.WithTimeout(ctx, tc.Timeout)\n\t\t\tdefer cancel()\n\t\t\terr := t.CreateGNMIClient(ctx)\n\t\t\tif err != nil {\n\t\t\t\ts.l.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcreq := proto.Clone(req).(*gnmi.GetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Get(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ts.l.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, n := range res.GetNotification() {\n\t\t\t\tif n.GetPrefix() == nil {\n\t\t\t\t\tn.Prefix = new(gnmi.Path)\n\t\t\t\t}\n\t\t\t\tif n.GetPrefix().GetTarget() == \"\" {\n\t\t\t\t\tn.Prefix.Target = name\n\t\t\t\t}\n\t\t\t\tresults <- n\n\t\t\t}\n\t\t}(name, tc)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\ts.l.Printf(\"sending GetResponse to %q: %+v\", peer.Addr, response)\n\treturn response, nil\n}\n\nfunc targetConfigToNotification(tc *types.TargetConfig) *gnmi.Notification {\n\tn := &gnmi.Notification{\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tPrefix: &gnmi.Path{\n\t\t\tOrigin: \"gnmic\",\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{\n\t\t\t\t\tName: \"target\",\n\t\t\t\t\tKey:  map[string]string{\"name\": tc.Name},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tUpdate: []*gnmi.Update{\n\t\t\t{\n\t\t\t\tPath: &gnmi.Path{\n\t\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t\t{Name: \"address\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Address},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif tc.Username != nil {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"username\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: *tc.Username},\n\t\t\t},\n\t\t})\n\t}\n\tif tc.Insecure != nil {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"insecure\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.Insecure)},\n\t\t\t},\n\t\t})\n\t}\n\tif tc.SkipVerify != nil {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"skip-verify\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.SkipVerify)},\n\t\t\t},\n\t\t})\n\t}\n\tn.Update = append(n.Update, &gnmi.Update{\n\t\tPath: &gnmi.Path{\n\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t{Name: \"timeout\"},\n\t\t\t},\n\t\t},\n\t\tVal: &gnmi.TypedValue{\n\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Timeout.String()},\n\t\t},\n\t})\n\tif tc.TLSCA != nil {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"tls-ca\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCAString()},\n\t\t\t},\n\t\t})\n\t}\n\tif tc.TLSCert != nil {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"tls-cert\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCertString()},\n\t\t\t},\n\t\t})\n\t}\n\tif tc.TLSKey != nil && tc.TLSKeyString() != \"NA\" {\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"tls-key\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSKeyString()},\n\t\t\t},\n\t\t})\n\t}\n\tif len(tc.Outputs) > 0 {\n\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\tfor _, out := range tc.Outputs {\n\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: out},\n\t\t\t})\n\t\t}\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"outputs\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\tif len(tc.Subscriptions) > 0 {\n\t\ttypedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions))\n\t\tfor _, sub := range tc.Subscriptions {\n\t\t\ttypedVals = append(typedVals, &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_AsciiVal{AsciiVal: sub},\n\t\t\t})\n\t\t}\n\t\tn.Update = append(n.Update, &gnmi.Update{\n\t\t\tPath: &gnmi.Path{\n\t\t\t\tElem: []*gnmi.PathElem{\n\t\t\t\t\t{Name: \"subscriptions\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVal: &gnmi.TypedValue{\n\t\t\t\tValue: &gnmi.TypedValue_LeaflistVal{\n\t\t\t\t\tLeaflistVal: &gnmi.ScalarArray{\n\t\t\t\t\t\tElement: typedVals,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn n\n}\n\nfunc (s *server) selectTargets(target string) (map[string]*types.TargetConfig, error) {\n\tif target == \"\" || target == \"*\" {\n\t\treturn s.targets, nil\n\t}\n\ttargetsNames := strings.Split(target, \",\")\n\ttargets := make(map[string]*types.TargetConfig)\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\nOUTER:\n\tfor i := range targetsNames {\n\t\tfor n, tc := range s.targets {\n\t\t\tif utils.GetHost(n) == targetsNames[i] {\n\t\t\t\ttargets[n] = tc\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\t\treturn nil, status.Errorf(codes.NotFound, \"target %q is not known\", targetsNames[i])\n\t}\n\treturn targets, nil\n}\n\nfunc (s *server) handlegNMIcInternalGet(_ context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) {\n\tif len(req.GetPath()) > 1 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"only one path at a time is supported\")\n\t}\n\tif req.GetPath()[0].Elem[0].Name == \"targets\" {\n\t\tnotifs := make([]*gnmi.Notification, 0, len(s.targets))\n\t\tfor _, tc := range s.targets {\n\t\t\tnotifs = append(notifs, targetConfigToNotification(tc))\n\t\t}\n\t\treturn &gnmi.GetResponse{Notification: notifs}, nil\n\t}\n\treturn nil, status.Errorf(codes.InvalidArgument, \"unknown path\")\n}\n"
  },
  {
    "path": "pkg/outputs/gnmi_output/gnmi_server_set.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_output\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api/target\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nfunc (s *server) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) {\n\tok := s.unaryRPCsem.TryAcquire(1)\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.ResourceExhausted, \"max number of Unary RPC reached\")\n\t}\n\tdefer s.unaryRPCsem.Release(1)\n\n\tnumUpdates := len(req.GetUpdate())\n\tnumReplaces := len(req.GetReplace())\n\tnumDeletes := len(req.GetDelete())\n\tif numUpdates+numReplaces+numDeletes == 0 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing update/replace/delete path(s)\")\n\t}\n\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\ttargetName := req.GetPrefix().GetTarget()\n\tpeer, _ := peer.FromContext(ctx)\n\ts.l.Printf(\"received Set request from %q to target %q\", peer.Addr, targetName)\n\n\ttargets, err := s.selectTargets(targetName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnumTargets := len(targets)\n\tif numTargets == 0 {\n\t\treturn nil, status.Errorf(codes.NotFound, \"unknown target(s) %q\", targetName)\n\t}\n\tresults := make(chan *gnmi.UpdateResult)\n\terrChan := make(chan error, numTargets)\n\n\tresponse := &gnmi.SetResponse{\n\t\t// assume one update per target, per update/replace/delete\n\t\tResponse: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes)),\n\t}\n\tdone := make(chan struct{})\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase upd, ok := <-results:\n\t\t\t\tif !ok {\n\t\t\t\t\tresponse.Timestamp = time.Now().UnixNano()\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tresponse.Response = append(response.Response, upd)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg := new(sync.WaitGroup)\n\twg.Add(numTargets)\n\tfor name, tc := range targets {\n\t\tgo func(name string, tc *types.TargetConfig) {\n\t\t\tname = utils.GetHost(name)\n\t\t\tdefer wg.Done()\n\t\t\tt := target.NewTarget(tc)\n\t\t\terr := t.CreateGNMIClient(ctx)\n\t\t\tif err != nil {\n\t\t\t\ts.l.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcreq := proto.Clone(req).(*gnmi.SetRequest)\n\t\t\tif creq.GetPrefix() == nil {\n\t\t\t\tcreq.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tif creq.GetPrefix().GetTarget() == \"\" || creq.GetPrefix().GetTarget() == \"*\" {\n\t\t\t\tcreq.Prefix.Target = name\n\t\t\t}\n\t\t\tres, err := t.Set(ctx, creq)\n\t\t\tif err != nil {\n\t\t\t\ts.l.Printf(\"target %q err: %v\", name, err)\n\t\t\t\terrChan <- fmt.Errorf(\"target %q err: %v\", name, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, upd := range res.GetResponse() {\n\t\t\t\tupd.Path.Target = name\n\t\t\t\tresults <- upd\n\t\t\t}\n\t\t}(name, tc)\n\t}\n\twg.Wait()\n\tclose(results)\n\tclose(errChan)\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"%v\", err)\n\t\t}\n\t}\n\t<-done\n\ts.l.Printf(\"sending SetResponse to %q: %+v\", peer.Addr, response)\n\treturn response, nil\n}\n"
  },
  {
    "path": "pkg/outputs/gnmi_output/gnmi_server_subscribe.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage gnmi_output\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/peer\"\n\t\"google.golang.org/grpc/status\"\n\n\t\"github.com/openconfig/gnmi/coalesce\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n)\n\nfunc (s *server) Subscribe(stream gnmi.GNMI_SubscribeServer) error {\n\tsc := &streamClient{\n\t\tstream: stream,\n\t}\n\tvar err error\n\tsc.req, err = stream.Recv()\n\tswitch {\n\tcase err == io.EOF:\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\tcase sc.req.GetSubscribe() == nil:\n\t\treturn status.Errorf(codes.InvalidArgument, \"the subscribe request must contain a subscription definition\")\n\t}\n\tsc.target = sc.req.GetSubscribe().GetPrefix().GetTarget()\n\tif sc.target == \"\" {\n\t\tsc.target = \"*\"\n\t\tsub := sc.req.GetSubscribe()\n\t\tif sub.GetPrefix() == nil {\n\t\t\tsub.Prefix = &gnmi.Path{Target: \"*\"}\n\t\t} else {\n\t\t\tsub.Prefix.Target = \"*\"\n\t\t}\n\t}\n\tif !s.c.HasTarget(sc.target) {\n\t\treturn status.Errorf(codes.NotFound, \"target %q not found\", sc.target)\n\t}\n\tpeer, _ := peer.FromContext(stream.Context())\n\ts.l.Printf(\"received a subscribe request mode=%v from %q for target %q\", sc.req.GetSubscribe().GetMode(), peer.Addr, sc.target)\n\tdefer s.l.Printf(\"subscription from peer %q terminated\", peer.Addr)\n\n\tsc.queue = coalesce.NewQueue()\n\terrChan := make(chan error, 3)\n\tsc.errChan = errChan\n\n\ts.l.Printf(\"acquiring subscription spot for target %q\", sc.target)\n\tok := s.subscribeRPCsem.TryAcquire(1)\n\tif !ok {\n\t\treturn status.Errorf(codes.ResourceExhausted, \"could not acquire a subscription spot\")\n\t}\n\ts.l.Printf(\"acquired subscription spot for target %q\", sc.target)\n\n\tswitch sc.req.GetSubscribe().GetMode() {\n\tcase gnmi.SubscriptionList_ONCE:\n\t\tgo func() {\n\t\t\ts.handleSubscriptionRequest(sc)\n\t\t\tsc.queue.Close()\n\t\t}()\n\tcase gnmi.SubscriptionList_POLL:\n\t\tgo s.handlePolledSubscription(sc)\n\tcase gnmi.SubscriptionList_STREAM:\n\t\tif sc.req.GetSubscribe().GetUpdatesOnly() {\n\t\t\tsc.queue.Insert(syncMarker{})\n\t\t}\n\t\tremove := addSubscription(s.m, sc.req.GetSubscribe(), &matchClient{queue: sc.queue})\n\t\tdefer remove()\n\t\tif !sc.req.GetSubscribe().GetUpdatesOnly() {\n\t\t\tgo s.handleSubscriptionRequest(sc)\n\t\t}\n\tdefault:\n\t\treturn status.Errorf(codes.InvalidArgument, \"unrecognized subscription mode: %v\", sc.req.GetSubscribe().GetMode())\n\t}\n\t// send all nodes added to queue\n\tgo s.sendStreamingResults(sc)\n\n\tvar errs = make([]error, 0)\n\tfor err := range errChan {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) > 0 {\n\t\tsb := strings.Builder{}\n\t\tsb.WriteString(\"multiple errors occurred:\\n\")\n\t\tfor _, err := range errs {\n\t\t\tsb.WriteString(fmt.Sprintf(\"- %v\\n\", err))\n\t\t}\n\t\treturn fmt.Errorf(\"%v\", sb)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/outputs/influxdb_output/influxdb_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage influxdb_output\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\nfunc (i *influxDBOutput) initCache(ctx context.Context, name string) error {\n\tvar err error\n\tcfg := i.cfg.Load()\n\tif cfg == nil {\n\t\treturn fmt.Errorf(\"config is nil\")\n\t}\n\ti.gnmiCache, err = cache.New(cfg.CacheConfig, cache.WithLogger(i.logger))\n\tif err != nil {\n\t\treturn err\n\t}\n\ti.cacheTicker = time.NewTicker(cfg.CacheFlushTimer)\n\ti.done = make(chan struct{})\n\tgo i.runCache(ctx, name)\n\treturn nil\n}\n\nfunc (i *influxDBOutput) stopCache() {\n\ti.cacheTicker.Stop()\n\tclose(i.done)\n\ti.gnmiCache.Stop()\n}\n\nfunc (i *influxDBOutput) runCache(ctx context.Context, name string) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-i.done:\n\t\t\treturn\n\t\tcase <-i.cacheTicker.C:\n\t\t\tcfg := i.cfg.Load()\n\t\t\tif cfg == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif cfg.Debug {\n\t\t\t\ti.logger.Printf(\"cache timer tick\")\n\t\t\t}\n\t\t\ti.readCache(ctx)\n\t\t}\n\t}\n}\n\nfunc (i *influxDBOutput) readCache(ctx context.Context) {\n\tnotifications, err := i.gnmiCache.ReadAll()\n\tif err != nil {\n\t\ti.logger.Printf(\"failed to read from cache: %v\", err)\n\t\treturn\n\t}\n\tcfg := i.cfg.Load()\n\tdc := i.dynCfg.Load()\n\n\tif cfg == nil || dc == nil {\n\t\treturn\n\t}\n\n\tif cfg.Debug {\n\t\ti.logger.Printf(\"read notifications: %+v\", notifications)\n\t}\n\n\tevents := make([]*formatters.EventMsg, 0, len(notifications))\n\tfor subName, notifs := range notifications {\n\t\t// build events without processors\n\t\tfor _, notif := range notifs {\n\t\t\tievents, err := formatters.ResponseToEventMsgs(subName,\n\t\t\t\t&gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{Update: notif},\n\t\t\t\t},\n\t\t\t\toutputs.Meta{\"subscription-name\": subName})\n\t\t\tif err != nil {\n\t\t\t\ti.logger.Printf(\"failed to convert gNMI notifications to events: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tevents = append(events, ievents...)\n\t\t}\n\t}\n\n\tfor _, proc := range dc.evps {\n\t\tevents = proc.Apply(events...)\n\t}\n\n\tresetChan := i.reset.Load()\n\tif resetChan == nil {\n\t\treturn\n\t}\n\tfor _, ev := range events {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-*resetChan:\n\t\t\treturn\n\t\tcase i.eventChan <- ev:\n\t\t}\n\t}\n}\n\nfunc cacheCfgEqual(a, b *cache.Config) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\t// Compare the fields you actually use; example:\n\treturn a.Type == b.Type &&\n\t\ta.Expiration == b.Expiration &&\n\t\ta.Debug == b.Debug &&\n\t\ta.Address == b.Address &&\n\t\ta.Timeout == b.Timeout &&\n\t\ta.Username == b.Username &&\n\t\ta.Password == b.Password &&\n\t\ta.MaxBytes == b.MaxBytes &&\n\t\ta.MaxMsgsPerSubscription == b.MaxMsgsPerSubscription &&\n\t\ta.FetchBatchSize == b.FetchBatchSize &&\n\t\ta.FetchWaitTime == b.FetchWaitTime\n}\n"
  },
  {
    "path": "pkg/outputs/influxdb_output/influxdb_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage influxdb_output\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"maps\"\n\t\"math\"\n\t\"net/url\"\n\t\"os\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\tinfluxdb2 \"github.com/influxdata/influxdb-client-go/v2\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultURL             = \"http://localhost:8086\"\n\tdefaultBatchSize       = 1000\n\tdefaultFlushTimer      = 10 * time.Second\n\tminHealthCheckPeriod   = 30 * time.Second\n\tdefaultCacheFlushTimer = 5 * time.Second\n\n\tnumWorkers     = 1\n\tloggingPrefix  = \"[influxdb_output:%s] \"\n\tdeleteTagValue = \"true\"\n)\n\nfunc init() {\n\toutputs.Register(\"influxdb\", func() outputs.Output {\n\t\treturn &influxDBOutput{}\n\t})\n}\n\ntype influxDBOutput struct {\n\toutputs.BaseOutput\n\n\tcfg    *atomic.Pointer[Config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\tclient *atomic.Pointer[influxdb2.Client]\n\n\tlogger    *log.Logger\n\tcancelFn  context.CancelFunc\n\teventChan chan *formatters.EventMsg\n\n\treset    *atomic.Pointer[chan struct{}]\n\tstartSig *atomic.Pointer[chan struct{}]\n\twasUP    atomic.Bool\n\n\tdbVersion atomic.Value // stores string\n\n\tgnmiCache   cache.Cache\n\tcacheTicker *time.Ticker\n\tdone        chan struct{}\n\n\tstore        store.Store[any]\n\thealthCancel context.CancelFunc\n}\n\nfunc (i *influxDBOutput) init() {\n\ti.cfg = new(atomic.Pointer[Config])\n\ti.dynCfg = new(atomic.Pointer[dynConfig])\n\ti.client = new(atomic.Pointer[influxdb2.Client])\n\ti.eventChan = make(chan *formatters.EventMsg)\n\ti.reset = new(atomic.Pointer[chan struct{}])\n\ti.startSig = new(atomic.Pointer[chan struct{}])\n\ti.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n}\n\ntype Config struct {\n\tName               string           `mapstructure:\"name,omitempty\"`\n\tURL                string           `mapstructure:\"url,omitempty\"`\n\tOrg                string           `mapstructure:\"org,omitempty\"`\n\tBucket             string           `mapstructure:\"bucket,omitempty\"`\n\tToken              string           `mapstructure:\"token,omitempty\"`\n\tBatchSize          uint             `mapstructure:\"batch-size,omitempty\"`\n\tFlushTimer         time.Duration    `mapstructure:\"flush-timer,omitempty\"`\n\tUseGzip            bool             `mapstructure:\"use-gzip,omitempty\"`\n\tEnableTLS          bool             `mapstructure:\"enable-tls,omitempty\"`\n\tTLS                *types.TLSConfig `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tHealthCheckPeriod  time.Duration    `mapstructure:\"health-check-period,omitempty\"`\n\tDebug              bool             `mapstructure:\"debug,omitempty\"`\n\tAddTarget          string           `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string           `mapstructure:\"target-template,omitempty\"`\n\tEventProcessors    []string         `mapstructure:\"event-processors,omitempty\"`\n\tEnableMetrics      bool             `mapstructure:\"enable-metrics,omitempty\"`\n\tOverrideTimestamps bool             `mapstructure:\"override-timestamps,omitempty\"`\n\tTimestampPrecision string           `mapstructure:\"timestamp-precision,omitempty\"`\n\tCacheConfig        *cache.Config    `mapstructure:\"cache,omitempty\"`\n\tCacheFlushTimer    time.Duration    `mapstructure:\"cache-flush-timer,omitempty\"`\n\tDeleteTag          string           `mapstructure:\"delete-tag,omitempty\"`\n}\n\nfunc (k *influxDBOutput) String() string {\n\tcfg := k.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (i *influxDBOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(i.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (i *influxDBOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && i.logger != nil {\n\t\ti.logger.SetOutput(logger.Writer())\n\t\ti.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (i *influxDBOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\ti.init() // init struct fields\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ti.store = options.Store\n\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\n\t// apply logger\n\ti.setLogger(options.Logger)\n\n\t// set defaults\n\ti.setDefaultsFor(newCfg)\n\n\tif _, err := url.Parse(newCfg.URL); err != nil {\n\t\treturn fmt.Errorf(\"invalid url: %w\", err)\n\t}\n\n\t// store config\n\ti.cfg.Store(newCfg)\n\n\t// build dynamic config\n\tdc := new(dynConfig)\n\n\t// initialize event processors\n\tdc.evps, err = i.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize template\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\ti.dynCfg.Store(dc)\n\n\t// initialize cache\n\tif newCfg.CacheConfig != nil {\n\t\terr = i.initCache(ctx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// initialize reset and startSig channels\n\tresetChan := make(chan struct{})\n\ti.reset.Store(&resetChan)\n\tstartSigChan := make(chan struct{})\n\ti.startSig.Store(&startSigChan)\n\n\tctx, i.cancelFn = context.WithCancel(ctx)\n\n\tinfluxOpts, err := clientOptsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// initialize influxdb client\nCRCLIENT:\n\tif ctx.Err() != nil {\n\t\treturn ctx.Err()\n\t}\n\tnewClient := influxdb2.NewClientWithOptions(newCfg.URL, newCfg.Token, influxOpts)\n\ti.client.Store(&newClient)\n\n\t// start influx health check\n\tif newCfg.HealthCheckPeriod > 0 {\n\t\terr = i.health(ctx)\n\t\tif err != nil {\n\t\t\ti.logger.Printf(\"failed to check influxdb health: %v\", err)\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tgoto CRCLIENT\n\t\t}\n\t\thcCtx, hcCancel := context.WithCancel(ctx)\n\t\ti.healthCancel = hcCancel\n\t\tgo i.healthCheck(hcCtx)\n\t}\n\n\ti.wasUP.Store(true)\n\ti.logger.Printf(\"initialized influxdb client: %s\", i.String())\n\n\tfor k := 0; k < numWorkers; k++ {\n\t\tgo i.worker(ctx, k)\n\t}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ti.Close()\n\t}()\n\treturn nil\n}\n\nfunc (i *influxDBOutput) setDefaultsFor(c *Config) {\n\tif c.URL == \"\" {\n\t\tc.URL = defaultURL\n\t}\n\tif c.BatchSize == 0 {\n\t\tc.BatchSize = defaultBatchSize\n\t}\n\tif c.FlushTimer == 0 {\n\t\tc.FlushTimer = defaultFlushTimer\n\t}\n\tif c.HealthCheckPeriod != 0 && c.HealthCheckPeriod < minHealthCheckPeriod {\n\t\tc.HealthCheckPeriod = minHealthCheckPeriod\n\t}\n\tif c.CacheConfig != nil {\n\t\tif c.CacheFlushTimer == 0 {\n\t\t\tc.CacheFlushTimer = defaultCacheFlushTimer\n\t\t}\n\t}\n}\n\n// Build influx options from an arbitrary config (no side effects on i.cfg)\nfunc clientOptsFor(c *Config) (*influxdb2.Options, error) {\n\tiopts := influxdb2.DefaultOptions().\n\t\tSetUseGZip(c.UseGzip).\n\t\tSetBatchSize(c.BatchSize).\n\t\tSetFlushInterval(uint(c.FlushTimer.Milliseconds()))\n\n\t// TLS from explicit TLS config\n\tif c.TLS != nil {\n\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile,\n\t\t\tc.TLS.CertFile,\n\t\t\tc.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tc.TLS.SkipVerify,\n\t\t\tfalse,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tiopts.SetTLSConfig(tlsConfig)\n\t}\n\n\t// Legacy \"EnableTLS\" flag (insecure)\n\tif c.EnableTLS {\n\t\tiopts.SetTLSConfig(&tls.Config{InsecureSkipVerify: true})\n\t}\n\n\tswitch c.TimestampPrecision {\n\tcase \"s\":\n\t\tiopts.SetPrecision(time.Second)\n\tcase \"ms\":\n\t\tiopts.SetPrecision(time.Millisecond)\n\tcase \"us\":\n\t\tiopts.SetPrecision(time.Microsecond)\n\t}\n\n\tif c.Debug {\n\t\tiopts.SetLogLevel(3)\n\t}\n\treturn iopts, nil\n}\n\nfunc (i *influxDBOutput) Validate(cfg map[string]any) error {\n\tncfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := url.Parse(ncfg.URL); err != nil {\n\t\treturn fmt.Errorf(\"invalid url: %w\", err)\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *influxDBOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrCfg := i.cfg.Load()\n\tif newCfg.Name == \"\" && currCfg != nil {\n\t\tnewCfg.Name = currCfg.Name\n\t}\n\n\ti.setDefaultsFor(newCfg)\n\n\t// check if event processors changed\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\t// rebuild dynamic config\n\tdc := new(dynConfig)\n\n\t// rebuild templates\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = t.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\t// rebuild event processors if needed\n\tprevDC := i.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = i.buildEventProcessors(i.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\t// store new dynamic config\n\ti.dynCfg.Store(dc)\n\t// store new config\n\ti.cfg.Store(newCfg)\n\t// check if client needs rebuild\n\tneedsClientRebuild := clientNeedsRebuild(currCfg, newCfg)\n\n\tif needsClientRebuild {\n\t\t// rebuild influxdb client options\n\t\tiopts, err := clientOptsFor(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// rebuild influxdb client\n\t\tnewClient := influxdb2.NewClientWithOptions(newCfg.URL, newCfg.Token, iopts)\n\n\t\t// health check if enabled\n\t\tif newCfg.HealthCheckPeriod > 0 {\n\t\t\tif _, err := newClient.Health(ctx); err != nil {\n\t\t\t\t// do not return error, continue\n\t\t\t\ti.logger.Printf(\"update: influx health probe failed (continuing): %v\", err)\n\t\t\t}\n\t\t}\n\n\t\t// swap client\n\t\toldClientPtr := i.client.Swap(&newClient)\n\t\toldClient := *oldClientPtr\n\n\t\t// close old client\n\t\tif oldClient != nil {\n\t\t\toldClient.Close()\n\t\t}\n\n\t\t// signal workers to rebuild their write APIs\n\t\toldReset := i.reset.Load()\n\t\tnewResetChan := make(chan struct{})\n\t\ti.reset.Store(&newResetChan)\n\t\tclose(*oldReset)\n\t}\n\n\t// cache toggle\n\toldHadCache := currCfg != nil && currCfg.CacheConfig != nil\n\tnewHasCache := newCfg.CacheConfig != nil\n\tswitch {\n\tcase oldHadCache && !newHasCache:\n\t\t// stop old cache if present\n\t\ti.stopCache()\n\tcase !oldHadCache && newHasCache:\n\t\t// init new cache if requested\n\t\tif err := i.initCache(ctx, newCfg.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase oldHadCache && newHasCache:\n\t\t// check if cache config changed\n\t\tsameCacheConfig := cacheCfgEqual(currCfg.CacheConfig, newCfg.CacheConfig)\n\t\tif sameCacheConfig {\n\t\t\tif currCfg.CacheFlushTimer != newCfg.CacheFlushTimer {\n\t\t\t\t// change flush timer\n\t\t\t\tif i.cacheTicker != nil {\n\t\t\t\t\ti.cacheTicker.Stop()\n\t\t\t\t}\n\t\t\t\ti.cacheTicker = time.NewTicker(newCfg.CacheFlushTimer)\n\t\t\t}\n\t\t} else {\n\t\t\t// cache config changed, stop old cache and init new cache\n\t\t\ti.stopCache()\n\t\t\tif err := i.initCache(ctx, newCfg.Name); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// handle health check changes\n\toldPeriod := time.Duration(0)\n\tif currCfg != nil {\n\t\toldPeriod = currCfg.HealthCheckPeriod\n\t}\n\tnewPeriod := newCfg.HealthCheckPeriod\n\tperiodChanged := oldPeriod != newPeriod\n\tenabledChanged := (oldPeriod == 0) != (newPeriod == 0)\n\n\tif enabledChanged || periodChanged {\n\t\tif i.healthCancel != nil {\n\t\t\ti.healthCancel()\n\t\t\ti.healthCancel = nil\n\t\t}\n\t\tif newPeriod > 0 {\n\t\t\t_ = i.health(ctx)\n\t\t\thcCtx, hcCancel := context.WithCancel(ctx)\n\t\t\ti.healthCancel = hcCancel\n\t\t\tgo i.healthCheck(hcCtx)\n\t\t}\n\t}\n\n\ti.logger.Printf(\"updated influxdb output: %s\", i.String())\n\treturn nil\n}\n\nfunc (i *influxDBOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := i.cfg.Load()\n\tdc := i.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\ti.logger,\n\t\ti.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\ti.dynCfg.Store(&newDC)\n\t\ti.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (i *influxDBOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\tcfg := i.cfg.Load()\n\tdc := i.dynCfg.Load()\n\tresetChan := i.reset.Load()\n\n\tif cfg == nil || dc == nil || resetChan == nil {\n\t\treturn\n\t}\n\n\tvar err error\n\trsp, err = outputs.AddSubscriptionTarget(rsp, meta, cfg.AddTarget, dc.targetTpl)\n\tif err != nil {\n\t\ti.logger.Printf(\"failed to add target to the response: %v\", err)\n\t}\n\n\tswitch rsp := rsp.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tmeasName := \"default\"\n\t\tif subName, ok := meta[\"subscription-name\"]; ok {\n\t\t\tmeasName = subName\n\t\t}\n\t\tif i.gnmiCache != nil {\n\t\t\ti.gnmiCache.Write(ctx, measName, rsp)\n\t\t\treturn\n\t\t}\n\t\tevents, err := formatters.ResponseToEventMsgs(measName, rsp, meta, dc.evps...)\n\t\tif err != nil {\n\t\t\ti.logger.Printf(\"failed to convert message to event: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, ev := range events {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-*resetChan:\n\t\t\t\treturn\n\t\t\tcase i.eventChan <- ev:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (i *influxDBOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tdc := i.dynCfg.Load()\n\tresetChan := i.reset.Load()\n\n\tif dc == nil || resetChan == nil {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase <-*resetChan:\n\t\treturn\n\tdefault:\n\t\tvar evs = []*formatters.EventMsg{ev}\n\t\tfor _, proc := range dc.evps {\n\t\t\tevs = proc.Apply(evs...)\n\t\t}\n\t\tfor _, pev := range evs {\n\t\t\ti.eventChan <- pev\n\t\t}\n\t}\n}\n\nfunc (i *influxDBOutput) Close() error {\n\ti.logger.Printf(\"closing client...\")\n\n\tcfg := i.cfg.Load()\n\tif cfg != nil && cfg.CacheConfig != nil {\n\t\ti.stopCache()\n\t}\n\tif i.healthCancel != nil {\n\t\ti.healthCancel()\n\t\ti.healthCancel = nil\n\t}\n\ti.cancelFn()\n\n\tclientPtr := i.client.Load()\n\tif *clientPtr != nil {\n\t\t(*clientPtr).Close()\n\t}\n\treset := i.reset.Load()\n\tif reset != nil {\n\t\tselect {\n\t\tcase <-*reset:\n\t\tdefault:\n\t\t\tclose(*reset) // unblock Write() and WriteEvent()\n\t\t}\n\t}\n\ti.logger.Printf(\"closed.\")\n\treturn nil\n}\n\nfunc (i *influxDBOutput) healthCheck(ctx context.Context) {\n\tcfg := i.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\n\tticker := time.NewTicker(cfg.HealthCheckPeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\ti.health(ctx)\n\t\t}\n\t}\n}\n\nfunc (i *influxDBOutput) health(ctx context.Context) error {\n\tclientPtr := i.client.Load()\n\tif clientPtr == nil || *clientPtr == nil {\n\t\treturn fmt.Errorf(\"client not initialized\")\n\t}\n\n\tres, err := (*clientPtr).Health(ctx)\n\tif err != nil {\n\t\ti.logger.Printf(\"failed health check: %v\", err)\n\t\tif i.wasUP.Load() {\n\t\t\toldReset := i.reset.Load()\n\t\t\tnewResetChan := make(chan struct{})\n\t\t\ti.reset.Store(&newResetChan)\n\t\t\tclose(*oldReset)\n\t\t}\n\t\treturn err\n\t}\n\n\tif res != nil {\n\t\tif res.Version != nil {\n\t\t\ti.dbVersion.Store(*res.Version)\n\t\t}\n\t\tb, err := json.Marshal(res)\n\t\tif err != nil {\n\t\t\ti.logger.Printf(\"failed to marshal health check result: %v\", err)\n\t\t\ti.logger.Printf(\"health check result: %+v\", res)\n\t\t\tif i.wasUP.Load() {\n\t\t\t\toldReset := i.reset.Load()\n\t\t\t\tnewResetChan := make(chan struct{})\n\t\t\t\ti.reset.Store(&newResetChan)\n\t\t\t\tclose(*oldReset)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ti.wasUP.Store(true)\n\t\toldStartSig := i.startSig.Load()\n\t\tnewStartSigChan := make(chan struct{})\n\t\ti.startSig.Store(&newStartSigChan)\n\t\tclose(*oldStartSig)\n\t\ti.logger.Printf(\"health check result: %s\", string(b))\n\t\treturn nil\n\t}\n\n\ti.wasUP.Store(true)\n\toldStartSig := i.startSig.Load()\n\tnewStartSigChan := make(chan struct{})\n\ti.startSig.Store(&newStartSigChan)\n\tclose(*oldStartSig)\n\ti.logger.Print(\"health check result is nil\")\n\treturn nil\n}\n\nfunc (i *influxDBOutput) worker(ctx context.Context, idx int) {\n\tfirstStart := true\nSTART:\n\tif ctx.Err() != nil {\n\t\ti.logger.Printf(\"worker-%d err=%v\", idx, ctx.Err())\n\t\treturn\n\t}\n\n\tcfg := i.cfg.Load()\n\tif cfg == nil {\n\t\ti.logger.Printf(\"worker-%d: config not initialized\", idx)\n\t\treturn\n\t}\n\n\tif !firstStart && cfg.HealthCheckPeriod > 0 {\n\t\ti.logger.Printf(\"worker-%d waiting for client recovery\", idx)\n\t\tstartSigChan := i.startSig.Load()\n\t\tif startSigChan != nil {\n\t\t\t<-*startSigChan\n\t\t}\n\t}\n\n\ti.logger.Printf(\"starting worker-%d\", idx)\n\n\tclientPtr := i.client.Load()\n\tif clientPtr == nil || *clientPtr == nil {\n\t\ti.logger.Printf(\"worker-%d: client not initialized\", idx)\n\t\treturn\n\t}\n\tclient := *clientPtr\n\n\tresetChan := i.reset.Load()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tif ctx.Err() != nil {\n\t\t\t\ti.logger.Printf(\"worker-%d err=%v\", idx, ctx.Err())\n\t\t\t}\n\t\t\ti.logger.Printf(\"worker-%d terminating...\", idx)\n\t\t\treturn\n\t\tcase ev := <-i.eventChan:\n\t\t\t// Reload config for each event to get fresh values\n\t\t\tcfg := i.cfg.Load()\n\t\t\tif cfg == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(ev.Values) == 0 && len(ev.Deletes) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(ev.Values) == 0 && cfg.DeleteTag == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor n, v := range ev.Values {\n\t\t\t\tswitch v := v.(type) {\n\t\t\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\t\t\t\tcase *gnmi.Decimal64:\n\t\t\t\t\tev.Values[n] = float64(v.Digits) / math.Pow10(int(v.Precision))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ev.Timestamp == 0 || cfg.OverrideTimestamps {\n\t\t\t\tev.Timestamp = time.Now().UnixNano()\n\t\t\t}\n\n\t\t\tif subscriptionName, ok := ev.Tags[\"subscription-name\"]; ok {\n\t\t\t\tev.Name = subscriptionName\n\t\t\t\tdelete(ev.Tags, \"subscription-name\")\n\t\t\t}\n\n\t\t\tif len(ev.Values) > 0 {\n\t\t\t\ti.convertUints(ev)\n\t\t\t\tclient.WriteAPI(cfg.Org, cfg.Bucket).\n\t\t\t\t\tWritePoint(influxdb2.NewPoint(ev.Name, ev.Tags, ev.Values, time.Unix(0, ev.Timestamp)))\n\t\t\t}\n\n\t\t\tif len(ev.Deletes) > 0 && cfg.DeleteTag != \"\" {\n\t\t\t\ttags := make(map[string]string, len(ev.Tags))\n\t\t\t\tmaps.Copy(tags, ev.Tags)\n\t\t\t\ttags[cfg.DeleteTag] = deleteTagValue\n\t\t\t\tvalues := make(map[string]any, len(ev.Deletes))\n\t\t\t\tfor _, del := range ev.Deletes {\n\t\t\t\t\tvalues[del] = \"\"\n\t\t\t\t}\n\t\t\t\tclient.WriteAPI(cfg.Org, cfg.Bucket).\n\t\t\t\t\tWritePoint(influxdb2.NewPoint(ev.Name, tags, values, time.Unix(0, ev.Timestamp)))\n\t\t\t}\n\t\tcase <-*resetChan:\n\t\t\tfirstStart = false\n\t\t\ti.logger.Printf(\"resetting worker-%d...\", idx)\n\t\t\tgoto START\n\t\tcase err := <-client.WriteAPI(cfg.Org, cfg.Bucket).Errors():\n\t\t\ti.logger.Printf(\"worker-%d write error: %v\", idx, err)\n\t\t}\n\t}\n}\n\nfunc (i *influxDBOutput) convertUints(ev *formatters.EventMsg) {\n\tdbVer := i.dbVersion.Load()\n\tif dbVer == nil {\n\t\treturn\n\t}\n\tdbVersion, ok := dbVer.(string)\n\tif !ok || !strings.HasPrefix(dbVersion, \"1.8\") {\n\t\treturn\n\t}\n\n\tfor k, v := range ev.Values {\n\t\tswitch v := v.(type) {\n\t\tcase uint:\n\t\t\tev.Values[k] = int(v)\n\t\tcase uint8:\n\t\t\tev.Values[k] = int(v)\n\t\tcase uint16:\n\t\t\tev.Values[k] = int(v)\n\t\tcase uint32:\n\t\t\tev.Values[k] = int(v)\n\t\tcase uint64:\n\t\t\tev.Values[k] = int(v)\n\t\t}\n\t}\n}\n\nfunc clientNeedsRebuild(old, new *Config) bool {\n\tif old == nil || new == nil {\n\t\treturn true\n\t}\n\treturn old.URL != new.URL ||\n\t\told.Token != new.Token ||\n\t\told.BatchSize != new.BatchSize ||\n\t\told.FlushTimer != new.FlushTimer ||\n\t\told.UseGzip != new.UseGzip ||\n\t\told.EnableTLS != new.EnableTLS ||\n\t\t!old.TLS.Equal(new.TLS) ||\n\t\told.TimestampPrecision != new.TimestampPrecision ||\n\t\told.Debug != new.Debug\n}\n"
  },
  {
    "path": "pkg/outputs/kafka_output/kafka_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage kafka_output\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar kafkaNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"kafka_output\",\n\tName:      \"number_of_kafka_msgs_sent_success_total\",\n\tHelp:      \"Number of msgs successfully sent by gnmic kafka output\",\n}, []string{\"name\", \"producer_id\"})\n\nvar kafkaNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"kafka_output\",\n\tName:      \"number_of_written_kafka_bytes_total\",\n\tHelp:      \"Number of bytes written by gnmic kafka output\",\n}, []string{\"name\", \"producer_id\"})\n\nvar kafkaNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"kafka_output\",\n\tName:      \"number_of_kafka_msgs_sent_fail_total\",\n\tHelp:      \"Number of failed msgs sent by gnmic kafka output\",\n}, []string{\"name\", \"producer_id\", \"reason\"})\n\nvar kafkaSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"kafka_output\",\n\tName:      \"msg_send_duration_ns\",\n\tHelp:      \"gnmic kafka output send duration in ns\",\n}, []string{\"name\", \"producer_id\"})\n\nfunc (k *kafkaOutput) initMetrics(name string) {\n\tkafkaNumberOfSentMsgs.WithLabelValues(name, \"\").Add(0)\n\tkafkaNumberOfSentBytes.WithLabelValues(name, \"\").Add(0)\n\tkafkaNumberOfFailSendMsgs.WithLabelValues(name, \"\", \"\").Add(0)\n\tkafkaSendDuration.WithLabelValues(name, \"\").Set(0)\n}\n\nfunc (k *kafkaOutput) registerMetrics() error {\n\tcfg := k.cfg.Load()\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif k.reg == nil {\n\t\tk.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn nil\n\t}\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = k.reg.Register(kafkaNumberOfSentMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = k.reg.Register(kafkaNumberOfSentBytes); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = k.reg.Register(kafkaNumberOfFailSendMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = k.reg.Register(kafkaSendDuration); err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\tk.initMetrics(cfg.Name)\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/kafka_output/kafka_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage kafka_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/IBM/sarama\"\n\t\"github.com/google/uuid\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tpkgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultKafkaMaxRetry    = 2\n\tdefaultKafkaTimeout     = 5 * time.Second\n\tdefaultKafkaTopic       = \"telemetry\"\n\tdefaultNumWorkers       = 1\n\tdefaultFormat           = \"event\"\n\tdefaultRecoveryWaitTime = 10 * time.Second\n\tdefaultAddress          = \"localhost:9092\"\n\tloggingPrefixTpl        = \"[kafka_output:%s] \"\n\tdefaultCompressionCodec = sarama.CompressionNone\n\n\trequiredAcksNoResponse   = \"no-response\"\n\trequiredAcksWaitForLocal = \"wait-for-local\"\n\trequiredAcksWaitForAll   = \"wait-for-all\"\n)\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\nfunc init() {\n\toutputs.Register(\"kafka\", func() outputs.Output {\n\t\treturn &kafkaOutput{}\n\t})\n}\n\nfunc (k *kafkaOutput) init() {\n\tk.cfg = new(atomic.Pointer[config])\n\tk.dynCfg = new(atomic.Pointer[dynConfig])\n\tk.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg])\n\tk.wg = new(sync.WaitGroup)\n\tk.logger = log.New(io.Discard, loggingPrefixTpl, utils.DefaultLoggingFlags)\n\tk.closeOnce = sync.Once{}\n\tk.closeSig = make(chan struct{})\n}\n\n// kafkaOutput //\ntype kafkaOutput struct {\n\toutputs.BaseOutput\n\n\tcfg       *atomic.Pointer[config]\n\tdynCfg    *atomic.Pointer[dynConfig]\n\tlogger    sarama.StdLogger\n\tsrcLogger *log.Logger\n\tmsgChan   *atomic.Pointer[chan *outputs.ProtoMsg]\n\twg        *sync.WaitGroup\n\n\trootCtx   context.Context\n\tcancelFn  context.CancelFunc\n\treg       *prometheus.Registry\n\tstore     store.Store[any]\n\tcloseOnce sync.Once\n\tcloseSig  chan struct{}\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tmsgTpl    *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n}\n\n// config //\ntype config struct {\n\tAddress            string           `mapstructure:\"address,omitempty\"`\n\tTopic              string           `mapstructure:\"topic,omitempty\"`\n\tTopicPrefix        string           `mapstructure:\"topic-prefix,omitempty\"`\n\tName               string           `mapstructure:\"name,omitempty\"`\n\tSASL               *types.SASL      `mapstructure:\"sasl,omitempty\"`\n\tTLS                *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\tMaxRetry           int              `mapstructure:\"max-retry,omitempty\"`\n\tTimeout            time.Duration    `mapstructure:\"timeout,omitempty\"`\n\tRecoveryWaitTime   time.Duration    `mapstructure:\"recovery-wait-time,omitempty\"`\n\tFlushFrequency     time.Duration    `mapstructure:\"flush-frequency,omitempty\"`\n\tSyncProducer       bool             `mapstructure:\"sync-producer,omitempty\"`\n\tRequiredAcks       string           `mapstructure:\"required-acks,omitempty\"`\n\tFormat             string           `mapstructure:\"format,omitempty\"`\n\tInsertKey          bool             `mapstructure:\"insert-key,omitempty\"`\n\tAddTarget          string           `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string           `mapstructure:\"target-template,omitempty\"`\n\tMsgTemplate        string           `mapstructure:\"msg-template,omitempty\"`\n\tSplitEvents        bool             `mapstructure:\"split-events,omitempty\"`\n\tNumWorkers         int              `mapstructure:\"num-workers,omitempty\"`\n\tCompressionCodec   string           `mapstructure:\"compression-codec,omitempty\"`\n\tKafkaVersion       string           `mapstructure:\"kafka-version,omitempty\"`\n\tDebug              bool             `mapstructure:\"debug,omitempty\"`\n\tBufferSize         int              `mapstructure:\"buffer-size,omitempty\"`\n\tOverrideTimestamps bool             `mapstructure:\"override-timestamps,omitempty\"`\n\tEnableMetrics      bool             `mapstructure:\"enable-metrics,omitempty\"`\n\tEventProcessors    []string         `mapstructure:\"event-processors,omitempty\"`\n}\n\nfunc (k *kafkaOutput) String() string {\n\tcfg := k.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (k *kafkaOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := pkgutils.GetConfigMaps(k.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\n// Init /\nfunc (k *kafkaOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tk.init() // init struct fields\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\tloggingPrefix := fmt.Sprintf(loggingPrefixTpl, newCfg.Name)\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tk.store = options.Store\n\tif options.Logger != nil {\n\t\tk.srcLogger = options.Logger\n\t\tsarama.Logger = log.New(options.Logger.Writer(), loggingPrefix, options.Logger.Flags())\n\t\tk.logger = sarama.Logger\n\t}\n\n\terr = k.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// store config\n\tk.cfg.Store(newCfg)\n\n\t// initialize registry\n\tk.reg = options.Registry\n\terr = k.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdc := new(dynConfig)\n\t// initialize event processors\n\tevps, err := k.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdc.evps = evps\n\tnewMsgChan := make(chan *outputs.ProtoMsg, uint(newCfg.BufferSize))\n\tk.msgChan.Store(&newMsgChan)\n\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tif newCfg.MsgTemplate != \"\" {\n\t\tdc.msgTpl, err = gtemplate.CreateTemplate(\"msg-template\", newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\n\tk.dynCfg.Store(dc)\n\tconfig, err := k.createConfigFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tk.rootCtx = ctx\n\tctx, k.cancelFn = context.WithCancel(k.rootCtx)\n\tk.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tcfg := *config\n\t\tcfg.ClientID = fmt.Sprintf(\"%s-%d\", config.ClientID, i)\n\t\tgo k.worker(ctx, i, &cfg, *k.msgChan.Load())\n\t}\n\treturn nil\n}\n\nfunc (k *kafkaOutput) Validate(cfg map[string]any) error {\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = k.setDefaultsFor(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"msg-template\", ncfg.MsgTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (k *kafkaOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(config)\n\tif err := outputs.DecodeConfig(cfg, newCfg); err != nil {\n\t\treturn err\n\t}\n\tcurrCfg := k.cfg.Load()\n\tif newCfg.Name == \"\" && currCfg != nil {\n\t\tnewCfg.Name = currCfg.Name\n\t}\n\n\terr := k.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\tvar targetTpl *template.Template\n\tif newCfg.TargetTemplate == \"\" {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargetTpl = t.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\tvar msgTpl *template.Template\n\tif newCfg.MsgTemplate != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"msg-template\", newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsgTpl = t.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tdc := &dynConfig{\n\t\ttargetTpl: targetTpl,\n\t\tmsgTpl:    msgTpl,\n\t\tmo: &formatters.MarshalOptions{\n\t\t\tFormat:     newCfg.Format,\n\t\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t\t},\n\t}\n\n\tprevDC := k.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = k.buildEventProcessors(log.New(os.Stderr, fmt.Sprintf(loggingPrefixTpl, newCfg.Name), utils.DefaultLoggingFlags), newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tk.dynCfg.Store(dc)\n\tk.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorkers {\n\t\tvar newChan chan *outputs.ProtoMsg\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewChan = *k.msgChan.Load()\n\t\t}\n\n\t\tbaseCfg, err := k.createConfigFor(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trunCtx, cancel := context.WithCancel(k.rootCtx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := k.cancelFn\n\t\toldWG := k.wg\n\t\toldMsgChan := *k.msgChan.Load()\n\n\t\t// swap\n\t\tk.cancelFn = cancel\n\t\tk.wg = newWG\n\t\tk.msgChan.Store(&newChan)\n\n\t\tk.wg.Add(currCfg.NumWorkers)\n\t\tfor i := 0; i < currCfg.NumWorkers; i++ {\n\t\t\tcfgCopy := *baseCfg\n\t\t\tcfgCopy.ClientID = fmt.Sprintf(\"%s-%d\", baseCfg.ClientID, i)\n\t\t\tgo k.worker(runCtx, i, &cfgCopy, newChan)\n\t\t}\n\n\t\tdrainDone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(drainDone)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase msg, ok := <-oldMsgChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- msg:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\t// wait for drain to complete\n\t\t<-drainDone\n\t\t// cancel old workers and loops\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t}\n\tk.logger.Printf(\"updated kafka output: %s\", k.String())\n\treturn nil\n}\n\nfunc (k *kafkaOutput) setDefaultsFor(cfg *config) error {\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif !(cfg.Format == \"event\" || cfg.Format == \"protojson\" || cfg.Format == \"prototext\" || cfg.Format == \"proto\" || cfg.Format == \"json\") {\n\t\treturn fmt.Errorf(\"unsupported output format '%s' for output type kafka\", cfg.Format)\n\t}\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.Topic == \"\" {\n\t\tcfg.Topic = defaultKafkaTopic\n\t}\n\tif cfg.MaxRetry == 0 {\n\t\tcfg.MaxRetry = defaultKafkaMaxRetry\n\t}\n\tif cfg.Timeout <= 0 {\n\t\tcfg.Timeout = defaultKafkaTimeout\n\t}\n\tif cfg.RecoveryWaitTime <= 0 {\n\t\tcfg.RecoveryWaitTime = defaultRecoveryWaitTime\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-\" + uuid.New().String()\n\t}\n\tif cfg.SASL == nil {\n\t\treturn nil\n\t}\n\tcfg.SASL.Mechanism = strings.ToUpper(cfg.SASL.Mechanism)\n\tswitch cfg.SASL.Mechanism {\n\tcase \"\":\n\t\tcfg.SASL.Mechanism = \"PLAIN\"\n\tcase \"OAUTHBEARER\":\n\t\tif cfg.SASL.TokenURL == \"\" {\n\t\t\treturn errors.New(\"missing token-url for kafka SASL mechanism OAUTHBEARER\")\n\t\t}\n\t}\n\n\tswitch cfg.RequiredAcks {\n\tcase requiredAcksNoResponse:\n\tcase requiredAcksWaitForLocal:\n\tcase requiredAcksWaitForAll:\n\tcase \"\":\n\t\tcfg.RequiredAcks = requiredAcksWaitForLocal\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown `required-acks` value %s: must be one of %q, %q or %q\", cfg.RequiredAcks, requiredAcksNoResponse, requiredAcksWaitForLocal, requiredAcksWaitForAll)\n\t}\n\treturn nil\n}\n\nfunc (k *kafkaOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := k.cfg.Load()\n\tdc := k.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tk.srcLogger,\n\t\tk.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tk.dynCfg.Store(&newDC)\n\t\tk.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\n// Write //\nfunc (k *kafkaOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tcurrentCfg := k.cfg.Load()\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\tmsgChan := *k.msgChan.Load()\n\twctx, cancel := context.WithTimeout(ctx, currentCfg.Timeout)\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase msgChan <- outputs.NewProtoMsg(rsp, meta):\n\tcase <-k.closeSig:\n\t\treturn\n\tcase <-wctx.Done():\n\t\tif currentCfg.Debug {\n\t\t\tk.logger.Printf(\"writing expired after %s, Kafka output might not be initialized\", currentCfg.Timeout)\n\t\t}\n\t\tif currentCfg.EnableMetrics {\n\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(currentCfg.Name, currentCfg.Name, \"timeout\").Inc()\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc (k *kafkaOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\n// Close //\nfunc (k *kafkaOutput) Close() error {\n\tk.cancelFn()\n\tk.wg.Wait()\n\tk.closeOnce.Do(func() {\n\t\tclose(k.closeSig)\n\t})\n\tk.logger.Printf(\"closed kafka output: %s\", k.String())\n\treturn nil\n}\n\nfunc (k *kafkaOutput) worker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) {\n\tcurrentCfg := k.cfg.Load()\n\tif currentCfg.SyncProducer {\n\t\tk.syncProducerWorker(ctx, idx, kafkaCfg, msgChan)\n\t\treturn\n\t}\n\tk.asyncProducerWorker(ctx, idx, kafkaCfg, msgChan)\n}\n\nfunc (k *kafkaOutput) asyncProducerWorker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) {\n\tvar producer sarama.AsyncProducer\n\tvar err error\n\tdefer k.wg.Done()\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\n\tk.logger.Printf(\"%s starting\", workerLogPrefix)\nCRPROD:\n\tif ctx.Err() != nil {\n\t\treturn\n\t}\n\tcfg := k.cfg.Load()\n\tproducer, err = sarama.NewAsyncProducer(strings.Split(cfg.Address, \",\"), kafkaCfg)\n\tif err != nil {\n\t\tk.logger.Printf(\"%s failed to create kafka producer: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.RecoveryWaitTime)\n\t\tgoto CRPROD\n\t}\n\tdefer producer.Close()\n\tk.logger.Printf(\"%s initialized kafka producer: %s\", workerLogPrefix, k.String())\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase msg, ok := <-producer.Successes():\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcfg := k.cfg.Load()\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tstart, ok := msg.Metadata.(time.Time)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tkafkaSendDuration.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Set(float64(time.Since(start).Nanoseconds()))\n\t\t\t\t\t}\n\t\t\t\t\tkafkaNumberOfSentMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Inc()\n\t\t\t\t\tkafkaNumberOfSentBytes.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Add(float64(msg.Value.Length()))\n\t\t\t\t}\n\t\t\tcase err, ok := <-producer.Errors():\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcfg := k.cfg.Load()\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"%s failed to send a kafka msg to topic '%s': %v\", workerLogPrefix, err.Msg.Topic, err.Err)\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"send_error\").Inc()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tk.logger.Printf(\"%s shutting down\", workerLogPrefix)\n\t\t\treturn\n\t\tcase m, ok := <-msgChan:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmsg := m.GetMsg()\n\t\t\tcfg := k.cfg.Load()\n\t\t\tdc := k.dynCfg.Load()\n\t\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\t\tif err != nil {\n\t\t\t\tk.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t\t}\n\t\t\tbb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"%s failed marshaling proto msg: %v\", workerLogPrefix, err)\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"marshal_error\").Inc()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(bb) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, b := range bb {\n\t\t\t\tif dc.msgTpl != nil {\n\t\t\t\t\tb, err = outputs.ExecTemplate(b, dc.msgTpl)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\tlog.Printf(\"failed to execute template: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"template_error\").Inc()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\ttopic := k.selectTopic(m.GetMeta())\n\t\t\t\tmsg := &sarama.ProducerMessage{\n\t\t\t\t\tTopic: topic,\n\t\t\t\t\tValue: sarama.ByteEncoder(b),\n\t\t\t\t}\n\t\t\t\tif cfg.InsertKey {\n\t\t\t\t\tmsg.Key = sarama.ByteEncoder(k.partitionKey(m.GetMeta()))\n\t\t\t\t}\n\t\t\t\tvar start time.Time\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tstart = time.Now()\n\t\t\t\t\tmsg.Metadata = start\n\t\t\t\t}\n\t\t\t\tproducer.Input() <- msg\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (k *kafkaOutput) syncProducerWorker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) {\n\tvar producer sarama.SyncProducer\n\tvar err error\n\tdefer k.wg.Done()\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\n\tk.logger.Printf(\"%s starting\", workerLogPrefix)\nCRPROD:\n\tcfg := k.cfg.Load()\n\tproducer, err = sarama.NewSyncProducer(strings.Split(cfg.Address, \",\"), kafkaCfg)\n\tif err != nil {\n\t\tk.logger.Printf(\"%s failed to create kafka producer: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.RecoveryWaitTime)\n\t\tgoto CRPROD\n\t}\n\tdefer producer.Close()\n\tk.logger.Printf(\"%s initialized kafka producer: %s\", workerLogPrefix, k.String())\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tk.logger.Printf(\"%s shutting down\", workerLogPrefix)\n\t\t\treturn\n\t\tcase m, ok := <-msgChan:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpmsg := m.GetMsg()\n\t\t\tcfg := k.cfg.Load()\n\t\t\tdc := k.dynCfg.Load()\n\t\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\t\tif err != nil {\n\t\t\t\tk.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t\t}\n\t\t\tbb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tk.logger.Printf(\"%s failed marshaling proto msg: %v\", workerLogPrefix, err)\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"marshal_error\").Inc()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(bb) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, b := range bb {\n\t\t\t\tif dc.msgTpl != nil {\n\t\t\t\t\tb, err = outputs.ExecTemplate(b, dc.msgTpl)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\tlog.Printf(\"failed to execute template: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"template_error\").Inc()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\ttopic := k.selectTopic(m.GetMeta())\n\t\t\t\tmsg := &sarama.ProducerMessage{\n\t\t\t\t\tTopic: topic,\n\t\t\t\t\tValue: sarama.ByteEncoder(b),\n\t\t\t\t}\n\t\t\t\tif cfg.InsertKey {\n\t\t\t\t\tmsg.Key = sarama.ByteEncoder(k.partitionKey(m.GetMeta()))\n\t\t\t\t}\n\t\t\t\tvar start time.Time\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tstart = time.Now()\n\t\t\t\t}\n\t\t\t\t_, _, err = producer.SendMessage(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tk.logger.Printf(\"%s failed to send a kafka msg to topic '%s': %v\", workerLogPrefix, topic, err)\n\t\t\t\t\t}\n\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\tkafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, \"send_error\").Inc()\n\t\t\t\t\t}\n\t\t\t\t\tproducer.Close()\n\t\t\t\t\ttime.Sleep(cfg.RecoveryWaitTime)\n\t\t\t\t\tgoto CRPROD\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tkafkaSendDuration.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Set(float64(time.Since(start).Nanoseconds()))\n\t\t\t\t\tkafkaNumberOfSentMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Inc()\n\t\t\t\t\tkafkaNumberOfSentBytes.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Add(float64(len(b)))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (k *kafkaOutput) createConfigFor(c *config) (*sarama.Config, error) {\n\tcfg := sarama.NewConfig()\n\tcfg.ClientID = c.Name\n\tif c.KafkaVersion != \"\" {\n\t\tvar err error\n\t\tcfg.Version, err = sarama.ParseKafkaVersion(c.KafkaVersion)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// SASL_PLAINTEXT or SASL_SSL\n\tif c.SASL != nil {\n\t\tcfg.Net.SASL.Enable = true\n\t\tcfg.Net.SASL.User = c.SASL.User\n\t\tcfg.Net.SASL.Password = c.SASL.Password\n\t\tcfg.Net.SASL.Mechanism = sarama.SASLMechanism(c.SASL.Mechanism)\n\t\tswitch cfg.Net.SASL.Mechanism {\n\t\tcase sarama.SASLTypeSCRAMSHA256:\n\t\t\tcfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {\n\t\t\t\treturn &XDGSCRAMClient{HashGeneratorFcn: SHA256}\n\t\t\t}\n\t\tcase sarama.SASLTypeSCRAMSHA512:\n\t\t\tcfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {\n\t\t\t\treturn &XDGSCRAMClient{HashGeneratorFcn: SHA512}\n\t\t\t}\n\t\tcase sarama.SASLTypeOAuth:\n\t\t\tcfg.Net.SASL.TokenProvider = pkgutils.NewTokenProvider(cfg.Net.SASL.User, cfg.Net.SASL.Password, c.SASL.TokenURL)\n\t\t}\n\t}\n\t// SSL or SASL_SSL\n\tif c.TLS != nil {\n\t\tvar err error\n\t\tcfg.Net.TLS.Enable = true\n\t\tcfg.Net.TLS.Config, err = utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile,\n\t\t\tc.TLS.CertFile,\n\t\t\tc.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tc.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcfg.Producer.Retry.Max = c.MaxRetry\n\tcfg.Producer.Return.Successes = true\n\tcfg.Producer.Timeout = c.Timeout\n\tcfg.Producer.Flush.Frequency = c.FlushFrequency\n\tswitch c.RequiredAcks {\n\tcase requiredAcksNoResponse:\n\tcase requiredAcksWaitForLocal:\n\t\tcfg.Producer.RequiredAcks = sarama.WaitForLocal\n\tcase requiredAcksWaitForAll:\n\t\tcfg.Producer.RequiredAcks = sarama.WaitForAll\n\t}\n\n\tcfg.Metadata.Full = false\n\n\tswitch c.CompressionCodec {\n\tcase \"gzip\":\n\t\tcfg.Producer.Compression = sarama.CompressionGZIP\n\tcase \"snappy\":\n\t\tcfg.Producer.Compression = sarama.CompressionSnappy\n\tcase \"zstd\":\n\t\tcfg.Producer.Compression = sarama.CompressionZSTD\n\tcase \"lz4\":\n\t\tcfg.Producer.Compression = sarama.CompressionLZ4\n\tdefault:\n\t\tcfg.Producer.Compression = defaultCompressionCodec\n\t}\n\n\treturn cfg, nil\n}\n\nfunc (k *kafkaOutput) partitionKey(m outputs.Meta) []byte {\n\tb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tb.Reset()\n\t\tstringBuilderPool.Put(b)\n\t}()\n\tb.WriteString(m[\"source\"])\n\tb.WriteString(\":::\")\n\tb.WriteString(m[\"subscription-name\"])\n\treturn []byte(b.String())\n}\n\nfunc (k *kafkaOutput) selectTopic(m outputs.Meta) string {\n\tcfg := k.cfg.Load()\n\tif cfg.TopicPrefix == \"\" {\n\t\treturn cfg.Topic\n\t}\n\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tsb.WriteString(cfg.TopicPrefix)\n\tif subname, ok := m[\"subscription-name\"]; ok {\n\t\tsb.WriteString(\"_\")\n\t\tsb.WriteString(subname)\n\t}\n\tif s, ok := m[\"source\"]; ok {\n\t\tsb.WriteString(\"_\")\n\t\tfor _, r := range s {\n\t\t\tif r == ':' {\n\t\t\t\tsb.WriteRune('_')\n\t\t\t} else {\n\t\t\t\tsb.WriteRune(r)\n\t\t\t}\n\t\t}\n\t}\n\treturn sb.String()\n}\n\n// config swap requirements:\n\n// decides if we need to rebuild sarama.Config and producers\nfunc needsProducerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\t// anything that maps into sarama.Config or producer type\n\tif old.Address != nw.Address ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\t!saslEq(old.SASL, nw.SASL) ||\n\t\told.KafkaVersion != nw.KafkaVersion ||\n\t\told.MaxRetry != nw.MaxRetry ||\n\t\told.Timeout != nw.Timeout ||\n\t\told.FlushFrequency != nw.FlushFrequency ||\n\t\told.RequiredAcks != nw.RequiredAcks ||\n\t\told.CompressionCodec != nw.CompressionCodec ||\n\t\told.SyncProducer != nw.SyncProducer ||\n\t\told.Name != nw.Name {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\t// producer dependencies OR worker count change\n\treturn needsProducerRestart(old, nw) || old.NumWorkers != nw.NumWorkers\n}\n\nfunc channelNeedsSwap(old, nw *config) bool {\n\tif old != nil && nw != nil {\n\t\treturn old.BufferSize != nw.BufferSize\n\t}\n\treturn true\n}\n\nfunc saslEq(a, b *types.SASL) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.User == b.User &&\n\t\ta.Password == b.Password &&\n\t\tstrings.EqualFold(a.Mechanism, b.Mechanism) &&\n\t\ta.TokenURL == b.TokenURL\n}\n"
  },
  {
    "path": "pkg/outputs/kafka_output/kafka_scram_client.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage kafka_output\n\nimport (\n\t\"crypto/sha256\"\n\t\"crypto/sha512\"\n\t\"hash\"\n\n\t\"github.com/xdg/scram\"\n)\n\nvar SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() }\nvar SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() }\n\ntype XDGSCRAMClient struct {\n\t*scram.Client\n\t*scram.ClientConversation\n\tscram.HashGeneratorFcn\n}\n\nfunc (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {\n\tx.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tx.ClientConversation = x.Client.NewConversation()\n\treturn nil\n}\n\nfunc (x *XDGSCRAMClient) Step(challenge string) (response string, err error) {\n\tresponse, err = x.ClientConversation.Step(challenge)\n\treturn\n}\n\nfunc (x *XDGSCRAMClient) Done() bool {\n\treturn x.ClientConversation.Done()\n}\n"
  },
  {
    "path": "pkg/outputs/nats_outputs/jetstream/jetstream_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage jetstream_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tloggingPrefix       = \"[jetstream_output:%s] \"\n\tdefaultSubjectName  = \"telemetry\"\n\tdefaultFormat       = \"event\"\n\tdefaultAddress      = \"localhost:4222\"\n\tnatsConnectWait     = 2 * time.Second\n\tdefaultNumWorkers   = 1\n\tdefaultWriteTimeout = 5 * time.Second\n)\n\nfunc init() {\n\toutputs.Register(\"jetstream\", func() outputs.Output {\n\t\treturn &jetstreamOutput{}\n\t})\n}\n\ntype subjectFormat string\n\nconst (\n\tsubjectFormat_Static                = \"static\"\n\tsubjectFormat_TargetSub             = \"target.subscription\"\n\tsubjectFormat_SubTarget             = \"subscription.target\"\n\tsubjectFormat_SubTargetPath         = \"subscription.target.path\"\n\tsubjectFormat_SubTargetPathWithKeys = \"subscription.target.pathKeys\"\n)\n\ntype config struct {\n\tName               string              `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tAddress            string              `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tStream             string              `mapstructure:\"stream,omitempty\" json:\"stream,omitempty\"`\n\tSubject            string              `mapstructure:\"subject,omitempty\" json:\"subject,omitempty\"`\n\tSubjectFormat      subjectFormat       `mapstructure:\"subject-format,omitempty\" json:\"subject-format,omitempty\"`\n\tCreateStream       *createStreamConfig `mapstructure:\"create-stream,omitempty\" json:\"create-stream,omitempty\"`\n\tUsername           string              `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword           string              `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\tConnectTimeWait    time.Duration       `mapstructure:\"connect-time-wait,omitempty\" json:\"connect-time-wait,omitempty\"`\n\tTLS                *types.TLSConfig    `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tFormat             string              `mapstructure:\"format,omitempty\" json:\"format,omitempty\"`\n\tSplitEvents        bool                `mapstructure:\"split-events,omitempty\" json:\"split-events,omitempty\"`\n\tAddTarget          string              `mapstructure:\"add-target,omitempty\" json:\"add-target,omitempty\"`\n\tTargetTemplate     string              `mapstructure:\"target-template,omitempty\" json:\"target-template,omitempty\"`\n\tMsgTemplate        string              `mapstructure:\"msg-template,omitempty\" json:\"msg-template,omitempty\"`\n\tOverrideTimestamps bool                `mapstructure:\"override-timestamps,omitempty\" json:\"override-timestamps,omitempty\"`\n\tNumWorkers         int                 `mapstructure:\"num-workers,omitempty\" json:\"num-workers,omitempty\"`\n\tWriteTimeout       time.Duration       `mapstructure:\"write-timeout,omitempty\" json:\"write-timeout,omitempty\"`\n\tDebug              bool                `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tBufferSize         uint                `mapstructure:\"buffer-size,omitempty\" json:\"buffer-size,omitempty\"`\n\tEnableMetrics      bool                `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tEventProcessors    []string            `mapstructure:\"event-processors,omitempty\" json:\"event-processors,omitempty\"`\n}\n\ntype createStreamConfig struct {\n\tDescription string        `mapstructure:\"description,omitempty\" json:\"description,omitempty\"`\n\tSubjects    []string      `mapstructure:\"subjects,omitempty\" json:\"subjects,omitempty\"`\n\tStorage     string        `mapstructure:\"storage,omitempty\" json:\"storage,omitempty\"`\n\tRetention   string        `mapstructure:\"retention-policy,omitempty\" json:\"retention-policy,omitempty\"`\n\tMaxMsgs     int64         `mapstructure:\"max-msgs,omitempty\" json:\"max-msgs,omitempty\"`\n\tMaxBytes    int64         `mapstructure:\"max-bytes,omitempty\" json:\"max-bytes,omitempty\"`\n\tMaxAge      time.Duration `mapstructure:\"max-age,omitempty\" json:\"max-age,omitempty\"`\n\tMaxMsgSize  int32         `mapstructure:\"max-msg-size,omitempty\" json:\"max-msg-size,omitempty\"`\n}\n\n// jetstreamOutput //\ntype jetstreamOutput struct {\n\toutputs.BaseOutput\n\n\tcfg      *atomic.Pointer[config]\n\trootCtx  context.Context\n\tcancelFn context.CancelFunc\n\n\tmsgChan *atomic.Pointer[chan *outputs.ProtoMsg] // atomic channel swaps\n\t// workers wait group\n\twg *sync.WaitGroup\n\t// dynamic config items that don't need a worker restart\n\tdynCfg *atomic.Pointer[dynConfig]\n\t// metrics registry\n\treg *prometheus.Registry\n\t// config store\n\tstore  store.Store[any]\n\tlogger *log.Logger\n\n\tcloseOnce sync.Once\n\tcloseSig  chan struct{}\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tmsgTpl    *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n}\n\nfunc (n *jetstreamOutput) init() {\n\tn.cfg = new(atomic.Pointer[config])\n\tn.dynCfg = new(atomic.Pointer[dynConfig])\n\tn.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg])\n\tn.wg = new(sync.WaitGroup)\n\tn.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n\tn.closeOnce = sync.Once{}\n\tn.closeSig = make(chan struct{})\n}\n\nfunc (n *jetstreamOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tn.init() // init struct fields\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ncfg.Name == \"\" {\n\t\tncfg.Name = name\n\t}\n\tn.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tn.store = options.Store\n\n\t// set defaults\n\terr = n.setDefaultsFor(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.cfg.Store(ncfg)\n\t// apply logger\n\tn.setLogger(options.Logger)\n\n\t// initialize registry\n\tn.reg = options.Registry\n\terr = n.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsgChan := make(chan *outputs.ProtoMsg, ncfg.BufferSize)\n\tn.msgChan.Store(&msgChan)\n\t// prep dynamic config\n\tdc := new(dynConfig)\n\t// initialize event processors\n\tevps, err := n.buildEventProcessors(options.Logger, ncfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdc.evps = evps\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     ncfg.Format,\n\t\tOverrideTS: ncfg.OverrideTimestamps,\n\t}\n\tif ncfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if ncfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tif ncfg.MsgTemplate != \"\" {\n\t\tdc.msgTpl, err = gtemplate.CreateTemplate(\"msg-template\", ncfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tn.dynCfg.Store(dc)\n\n\tn.rootCtx = ctx // store root context\n\tvar wctx context.Context\n\n\twctx, n.cancelFn = context.WithCancel(n.rootCtx) // create worker context\n\tn.wg.Add(ncfg.NumWorkers)\n\tfor i := 0; i < ncfg.NumWorkers; i++ {\n\t\tgo n.worker(wctx, i)\n\t}\n\n\treturn nil\n}\n\nfunc (n *jetstreamOutput) setDefaultsFor(cfg *config) error {\n\tif cfg.Stream == \"\" {\n\t\treturn errors.New(\"missing stream name\")\n\t}\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif cfg.SubjectFormat == \"\" {\n\t\tcfg.SubjectFormat = subjectFormat_Static\n\t}\n\tswitch cfg.SubjectFormat {\n\tcase subjectFormat_Static,\n\t\tsubjectFormat_TargetSub,\n\t\tsubjectFormat_SubTarget,\n\t\tsubjectFormat_SubTargetPath,\n\t\tsubjectFormat_SubTargetPathWithKeys:\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown subject-format value: %v\", cfg.SubjectFormat)\n\t}\n\tif cfg.Subject == \"\" {\n\t\tcfg.Subject = defaultSubjectName\n\t}\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.ConnectTimeWait <= 0 {\n\t\tcfg.ConnectTimeWait = natsConnectWait\n\t}\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-\" + uuid.New().String()\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.WriteTimeout <= 0 {\n\t\tcfg.WriteTimeout = defaultWriteTimeout\n\t}\n\tif cfg.CreateStream != nil {\n\t\tif len(cfg.CreateStream.Subjects) == 0 {\n\t\t\tcfg.CreateStream.Subjects = []string{fmt.Sprintf(\"%s.>\", cfg.Stream)}\n\t\t}\n\t\tif cfg.CreateStream.Description == \"\" {\n\t\t\tcfg.CreateStream.Description = \"created by gNMIc\"\n\t\t}\n\t\tif cfg.CreateStream.Storage == \"\" {\n\t\t\tcfg.CreateStream.Storage = \"memory\"\n\t\t}\n\t\tif cfg.CreateStream.Retention == \"\" {\n\t\t\tcfg.CreateStream.Retention = \"limits\"\n\t\t}\n\t\t// Validate retention policy value\n\t\tif !isValidRetentionPolicy(cfg.CreateStream.Retention) {\n\t\t\treturn fmt.Errorf(\"invalid retention-policy: %s (must be 'limits' or 'workqueue')\",\n\t\t\t\tcfg.CreateStream.Retention)\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamOutput) Validate(cfg map[string]any) error {\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = n.setDefaultsFor(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"msg-template\", ncfg.MsgTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = n.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcurrCfg := n.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\tstreamChanged := streamChanged(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\t//rebuild\n\tvar targetTpl *template.Template\n\tif newCfg.TargetTemplate == \"\" {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargetTpl = t.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\tvar msgTpl *template.Template\n\tif newCfg.MsgTemplate != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"msg-template\", newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsgTpl = t.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tdc := &dynConfig{\n\t\ttargetTpl: targetTpl,\n\t\tmsgTpl:    msgTpl,\n\t\tmo: &formatters.MarshalOptions{\n\t\t\tFormat:     newCfg.Format,\n\t\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t\t},\n\t}\n\n\t// rebuild processors ?\n\tprevDC := n.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\t// store new dynamic config\n\tn.dynCfg.Store(dc)\n\t// store new config\n\tn.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorkers || streamChanged {\n\t\tvar newChan chan *outputs.ProtoMsg\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize)\n\n\t\t} else {\n\t\t\tnewChan = *n.msgChan.Load()\n\t\t}\n\n\t\trunCtx, cancel := context.WithCancel(n.rootCtx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := n.cancelFn\n\t\toldWG := n.wg\n\t\toldMsgChan := *n.msgChan.Load()\n\t\t// swap\n\t\tn.cancelFn = cancel\n\t\tn.wg = newWG\n\t\tn.msgChan.Store(&newChan)\n\n\t\t// restart workers\n\t\tn.wg.Add(currCfg.NumWorkers)\n\t\tfor i := 0; i < currCfg.NumWorkers; i++ {\n\t\t\tgo n.worker(runCtx, i)\n\t\t}\n\t\t// cancel old workers\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t\tif swapChannel {\n\t\t\t// best effort drain old channel\n\t\tOUTER_LOOP: // break label\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase msg, ok := <-oldMsgChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- msg:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// new channel full, drop message\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak OUTER_LOOP // break out of the outer loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tn.logger.Printf(\"updated jetstream output: %s\", n.String())\n\treturn nil\n\n}\n\nfunc (n *jetstreamOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := n.cfg.Load()\n\tdc := n.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tn.logger,\n\t\tn.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tn.dynCfg.Store(&newDC)\n\t\tn.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (n *jetstreamOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tdc := n.dynCfg.Load()\n\tcfg := n.cfg.Load()\n\tif rsp == nil || dc == nil || dc.mo == nil {\n\t\treturn\n\t}\n\twctx, cancel := context.WithTimeout(ctx, cfg.WriteTimeout)\n\tdefer cancel()\n\n\tch := n.msgChan.Load()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase *ch <- outputs.NewProtoMsg(rsp, meta):\n\tcase <-n.closeSig:\n\t\treturn\n\tcase <-wctx.Done():\n\t\tif cfg.Debug {\n\t\t\tn.logger.Printf(\"writing expired after %s, JetStream output might not be initialized\", cfg.WriteTimeout)\n\t\t}\n\t\tif cfg.EnableMetrics {\n\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(cfg.Name, \"timeout\").Inc()\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc (n *jetstreamOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\nfunc (n *jetstreamOutput) Close() error {\n\tn.cancelFn()\n\tn.wg.Wait()\n\tn.closeOnce.Do(func() {\n\t\tclose(n.closeSig)\n\t})\n\tn.logger.Printf(\"closed jetstream output: %s\", n.String())\n\treturn nil\n}\n\nfunc (c *config) String() string {\n\tb, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (n *jetstreamOutput) String() string {\n\tcfg := n.cfg.Load()\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (n *jetstreamOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(n.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (n *jetstreamOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && n.logger != nil {\n\t\tn.logger.SetOutput(logger.Writer())\n\t\tn.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (n *jetstreamOutput) worker(ctx context.Context, i int) {\n\tdefer n.wg.Done()\n\tvar natsConn *nats.Conn\n\tvar err error\n\tvar subject string\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", i)\n\tn.logger.Printf(\"%s starting\", workerLogPrefix)\n\t// snapshot msgChan\n\tmsgChan := *n.msgChan.Load()\nCRCONN:\n\tif ctx.Err() != nil {\n\t\treturn\n\t}\n\tcfg := n.cfg.Load()\n\tname := fmt.Sprintf(\"%s-%d\", cfg.Name, i)\n\tnatsConn, err = n.createNATSConn(ctx, cfg, i)\n\tif err != nil {\n\t\tn.logger.Printf(\"%s failed to create connection: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\tgoto CRCONN\n\t}\n\tjs, err := natsConn.JetStream()\n\tif err != nil {\n\t\tif cfg.Debug {\n\t\t\tn.logger.Printf(\"%s failed to create jetstream context: %v\", workerLogPrefix, err)\n\t\t}\n\t\tif cfg.EnableMetrics {\n\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"jetstream_context_error\").Inc()\n\t\t}\n\t\tnatsConn.Close()\n\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\tgoto CRCONN\n\t}\n\tn.logger.Printf(\"%s initialized nats jetstream producer: %s\", workerLogPrefix, cfg)\n\t// worker-0 create stream if configured\n\tif i == 0 {\n\t\terr = n.createStream(js, cfg)\n\t\tif err != nil {\n\t\t\tif cfg.Debug {\n\t\t\t\tn.logger.Printf(\"%s failed to create stream: %v\", workerLogPrefix, err)\n\t\t\t}\n\t\t\tif cfg.EnableMetrics {\n\t\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"create_stream_error\").Inc()\n\t\t\t}\n\t\t\tnatsConn.Close()\n\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\t\tgoto CRCONN\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tnatsConn.Close()\n\t\t\tn.logger.Printf(\"%s shutting down\", workerLogPrefix)\n\t\t\treturn\n\t\tcase m := <-msgChan:\n\t\t\tpmsg := m.GetMsg()\n\t\t\t// get fresh config\n\t\t\tcfg := n.cfg.Load()\n\t\t\t// snapshot template and marshal options\n\t\t\tdc := n.dynCfg.Load()\n\t\t\tname := fmt.Sprintf(\"%s-%d\", cfg.Name, i)\n\t\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\t\tif err != nil {\n\t\t\t\tn.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t\t}\n\t\t\tvar rs []proto.Message\n\t\t\tswitch cfg.SubjectFormat {\n\t\t\tcase subjectFormat_Static, subjectFormat_TargetSub, subjectFormat_SubTarget:\n\t\t\t\trs = []proto.Message{pmsg}\n\t\t\tcase subjectFormat_SubTargetPath, subjectFormat_SubTargetPathWithKeys:\n\t\t\t\tswitch rsp := pmsg.(type) {\n\t\t\t\tcase *gnmi.SubscribeResponse:\n\t\t\t\t\tswitch rsp := rsp.Response.(type) {\n\t\t\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\t\t\trs = splitSubscribeResponse(rsp)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, r := range rs {\n\t\t\t\tbb, err := outputs.Marshal(r, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tn.logger.Printf(\"%s failed marshaling proto msg: %v\", workerLogPrefix, err)\n\t\t\t\t\t}\n\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"marshal_error\").Inc()\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif len(bb) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, b := range bb {\n\t\t\t\t\tif dc.msgTpl != nil {\n\t\t\t\t\t\tb, err = outputs.ExecTemplate(b, dc.msgTpl)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\t\tlog.Printf(\"failed to execute template: %v\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"template_error\").Inc()\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tsubject, err = n.subjectName(r, m.GetMeta(), dc, cfg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\tn.logger.Printf(\"%s failed to get subject name: %v\", workerLogPrefix, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"subject_name_error\").Inc()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tvar start time.Time\n\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\tstart = time.Now()\n\t\t\t\t\t}\n\t\t\t\t\t_, err = js.Publish(subject, b, nats.Context(ctx))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\tn.logger.Printf(\"%s failed to write to subject '%s': %v\", workerLogPrefix, subject, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\t\tjetStreamNumberOfFailSendMsgs.WithLabelValues(name, \"publish_error\").Inc()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tnatsConn.Close()\n\t\t\t\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\t\t\t\t\tgoto CRCONN\n\t\t\t\t\t}\n\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\tjetStreamSendDuration.WithLabelValues(name).Set(float64(time.Since(start).Nanoseconds()))\n\t\t\t\t\t\tjetStreamNumberOfSentMsgs.WithLabelValues(name, subject).Inc()\n\t\t\t\t\t\tjetStreamNumberOfSentBytes.WithLabelValues(name, subject).Add(float64(len(b)))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype customDialer struct {\n\tctx    context.Context\n\tlogger *log.Logger\n}\n\nfunc (n *jetstreamOutput) newCustomDialer(ctx context.Context) *customDialer {\n\treturn &customDialer{ctx: ctx, logger: n.logger}\n}\n\nfunc (d *customDialer) Dial(network, address string) (net.Conn, error) {\n\tctx, cancel := context.WithCancel(d.ctx)\n\tdefer cancel()\n\n\td.logger.Printf(\"attempting to connect to %s\", address)\n\tselect {\n\tcase <-d.ctx.Done():\n\t\treturn nil, d.ctx.Err()\n\tdefault:\n\t\tnd := &net.Dialer{}\n\t\tconn, err := nd.DialContext(ctx, network, address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\td.logger.Printf(\"successfully connected to NATS server %s\", address)\n\t\treturn conn, nil\n\t}\n}\n\nfunc (n *jetstreamOutput) createNATSConn(ctx context.Context, c *config, idx int) (*nats.Conn, error) {\n\topts := []nats.Option{\n\t\tnats.Name(fmt.Sprintf(\"%s-%d\", c.Name, idx)),\n\t\tnats.SetCustomDialer(n.newCustomDialer(ctx)),\n\t\tnats.ReconnectWait(c.ConnectTimeWait),\n\t\t// nats.ReconnectBufSize(natsReconnectBufferSize),\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tn.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectErrHandler(func(_ *nats.Conn, err error) {\n\t\t\tn.logger.Printf(\"Disconnected from NATS err=%v\", err)\n\t\t}),\n\t\tnats.ClosedHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t}\n\tif c.TLS != nil {\n\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile,\n\t\t\tc.TLS.CertFile,\n\t\t\tc.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tc.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif tlsConfig != nil {\n\t\t\topts = append(opts, nats.Secure(tlsConfig))\n\t\t}\n\t}\n\tif c.Username != \"\" && c.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.Username, c.Password))\n\t}\n\tnc, err := nats.Connect(c.Address, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nc, nil\n}\n\nfunc (n *jetstreamOutput) subjectName(m proto.Message, meta outputs.Meta, dc *dynConfig, cfg *config) (string, error) {\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tsb.WriteString(cfg.Stream)\n\tsb.WriteString(\".\")\n\tswitch cfg.SubjectFormat {\n\tcase subjectFormat_Static:\n\t\tsb.WriteString(cfg.Subject)\n\tcase subjectFormat_TargetSub:\n\t\terr := dc.targetTpl.Execute(sb, meta)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif sub, ok := meta[\"subscription-name\"]; ok {\n\t\t\tsb.WriteString(\".\")\n\t\t\tsb.WriteString(sub)\n\t\t}\n\tcase subjectFormat_SubTarget:\n\t\tif sub, ok := meta[\"subscription-name\"]; ok {\n\t\t\tsb.WriteString(sub)\n\t\t\tsb.WriteString(\".\")\n\t\t}\n\t\terr := dc.targetTpl.Execute(sb, meta)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\tcase subjectFormat_SubTargetPath:\n\t\tif sub, ok := meta[\"subscription-name\"]; ok {\n\t\t\tsb.WriteString(sub)\n\t\t\tsb.WriteString(\".\")\n\t\t}\n\t\terr := dc.targetTpl.Execute(sb, meta)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tsb.WriteString(\".\")\n\t\tswitch rsp := m.(type) {\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tswitch rsp := rsp.Response.(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tvar prefixSubject string\n\t\t\t\tif rsp.Update.GetPrefix() != nil {\n\t\t\t\t\tprefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), false)\n\t\t\t\t}\n\t\t\t\tvar pathSubject string\n\t\t\t\tif len(rsp.Update.GetUpdate()) > 0 {\n\t\t\t\t\tpathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), false)\n\t\t\t\t}\n\t\t\t\tif prefixSubject != \"\" {\n\t\t\t\t\tsb.WriteString(prefixSubject)\n\t\t\t\t\tsb.WriteString(\".\")\n\t\t\t\t}\n\t\t\t\tif pathSubject != \"\" {\n\t\t\t\t\tsb.WriteString(pathSubject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase subjectFormat_SubTargetPathWithKeys:\n\t\tif sub, ok := meta[\"subscription-name\"]; ok {\n\t\t\tsb.WriteString(sub)\n\t\t\tsb.WriteString(\".\")\n\t\t}\n\t\terr := dc.targetTpl.Execute(sb, meta)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tsb.WriteString(\".\")\n\t\tswitch rsp := m.(type) {\n\t\tcase *gnmi.SubscribeResponse:\n\t\t\tswitch rsp := rsp.Response.(type) {\n\t\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\t\tvar prefixSubject string\n\t\t\t\tif rsp.Update.GetPrefix() != nil {\n\t\t\t\t\tprefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), true)\n\t\t\t\t}\n\t\t\t\tvar pathSubject string\n\t\t\t\tif len(rsp.Update.GetUpdate()) > 0 {\n\t\t\t\t\tpathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), true)\n\t\t\t\t}\n\t\t\t\tif prefixSubject != \"\" {\n\t\t\t\t\tsb.WriteString(prefixSubject)\n\t\t\t\t\tsb.WriteString(\".\")\n\t\t\t\t}\n\t\t\t\tif pathSubject != \"\" {\n\t\t\t\t\tsb.WriteString(pathSubject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn sb.String(), nil\n}\n\nfunc splitSubscribeResponse(m *gnmi.SubscribeResponse_Update) []proto.Message {\n\tif m == nil || m.Update == nil {\n\t\treturn nil\n\t}\n\trs := make([]proto.Message, 0, len(m.Update.GetUpdate())+len(m.Update.Delete))\n\tfor _, upd := range m.Update.GetUpdate() {\n\t\trs = append(rs, &gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\tTimestamp: m.Update.GetTimestamp(),\n\t\t\t\t\tPrefix:    m.Update.GetPrefix(),\n\t\t\t\t\tUpdate:    []*gnmi.Update{upd},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\tfor _, del := range m.Update.GetDelete() {\n\t\trs = append(rs, &gnmi.SubscribeResponse{\n\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\tUpdate: &gnmi.Notification{\n\t\t\t\t\tTimestamp: m.Update.GetTimestamp(),\n\t\t\t\t\tPrefix:    m.Update.GetPrefix(),\n\t\t\t\t\tDelete:    []*gnmi.Path{del},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn rs\n}\n\nfunc gNMIPathToSubject(p *gnmi.Path, keys bool) string {\n\tif p == nil {\n\t\treturn \"\"\n\t}\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tif p.GetOrigin() != \"\" {\n\t\tfmt.Fprintf(sb, \"%s.\", p.GetOrigin())\n\t}\n\tfor i, e := range p.GetElem() {\n\t\tif i > 0 {\n\t\t\tsb.WriteString(\".\")\n\t\t}\n\t\tsb.WriteString(e.Name)\n\t\tif keys {\n\t\t\tif len(e.Key) > 0 {\n\t\t\t\t// sort keys by name\n\t\t\t\tkNames := make([]string, 0, len(e.Key))\n\t\t\t\tfor k := range e.Key {\n\t\t\t\t\tkNames = append(kNames, k)\n\t\t\t\t}\n\t\t\t\tsort.Strings(kNames)\n\t\t\t\tfor _, k := range kNames {\n\t\t\t\t\tsk := sanitizeKey(e.GetKey()[k])\n\t\t\t\t\tfmt.Fprintf(sb, \".{%s=%s}\", k, sk)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn sb.String()\n}\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\nfunc sanitizeKey(k string) string {\n\t// Fast path: no special chars\n\tif !strings.ContainsAny(k, \". \") {\n\t\treturn k\n\t}\n\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\n\tsb.Grow(len(k))\n\n\tfor _, r := range k {\n\t\tswitch r {\n\t\tcase '.':\n\t\t\tsb.WriteRune('^')\n\t\tcase ' ':\n\t\t\tsb.WriteRune('~')\n\t\tdefault:\n\t\t\tsb.WriteRune(r)\n\t\t}\n\t}\n\n\treturn sb.String()\n}\n\nfunc storageType(s string) nats.StorageType {\n\tswitch strings.ToLower(s) {\n\tcase \"file\":\n\t\treturn nats.FileStorage\n\tcase \"memory\":\n\t\treturn nats.MemoryStorage\n\t}\n\treturn nats.MemoryStorage\n}\n\nfunc isValidRetentionPolicy(policy string) bool {\n\tswitch strings.ToLower(policy) {\n\tcase \"limits\", \"workqueue\":\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc retentionPolicy(s string) nats.RetentionPolicy {\n\tswitch strings.ToLower(s) {\n\tcase \"workqueue\":\n\t\treturn nats.WorkQueuePolicy\n\tcase \"limits\":\n\t\treturn nats.LimitsPolicy\n\t}\n\treturn nats.LimitsPolicy\n}\n\nfunc (n *jetstreamOutput) createStream(js nats.JetStreamContext, cfg *config) error {\n\t// If CreateStream is not configured, we're using an existing stream\n\tif cfg.CreateStream == nil {\n\t\treturn nil\n\t}\n\n\tstream, err := js.StreamInfo(cfg.Stream)\n\tif err != nil {\n\t\tif !errors.Is(err, nats.ErrStreamNotFound) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Stream exists, nothing to do\n\tif stream != nil {\n\t\treturn nil\n\t}\n\n\t// Create stream with configured retention policy\n\tstreamConfig := &nats.StreamConfig{\n\t\tName:        cfg.Stream,\n\t\tDescription: cfg.CreateStream.Description,\n\t\tRetention:   retentionPolicy(cfg.CreateStream.Retention),\n\t\tSubjects:    cfg.CreateStream.Subjects,\n\t\tStorage:     storageType(cfg.CreateStream.Storage),\n\t\tMaxMsgs:     cfg.CreateStream.MaxMsgs,\n\t\tMaxBytes:    cfg.CreateStream.MaxBytes,\n\t\tMaxAge:      cfg.CreateStream.MaxAge,\n\t\tMaxMsgSize:  cfg.CreateStream.MaxMsgSize,\n\t}\n\n\t_, err = js.AddStream(streamConfig)\n\treturn err\n}\n\nfunc channelNeedsSwap(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\told.Address != nw.Address ||\n\t\told.Username != nw.Username ||\n\t\told.Password != nw.Password\n}\n\nfunc streamChanged(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\t// stream name changed?\n\tif old.Stream != nw.Stream {\n\t\treturn true\n\t}\n\t// create stream presence changed?\n\tif (old.CreateStream == nil) != (nw.CreateStream == nil) {\n\t\treturn true\n\t}\n\t// both nil: nothing else to compare\n\tif old.CreateStream == nil && nw.CreateStream == nil {\n\t\treturn false\n\t}\n\t// compare contents\n\toc, nc := old.CreateStream, nw.CreateStream\n\tif oc.Description != nc.Description {\n\t\treturn true\n\t}\n\tif !slices.Equal(oc.Subjects, nc.Subjects) {\n\t\treturn true\n\t}\n\tif storageType(oc.Storage) != storageType(nc.Storage) {\n\t\treturn true\n\t}\n\tif oc.MaxMsgs != nc.MaxMsgs {\n\t\treturn true\n\t}\n\tif oc.MaxBytes != nc.MaxBytes {\n\t\treturn true\n\t}\n\tif oc.MaxAge != nc.MaxAge {\n\t\treturn true\n\t}\n\tif oc.MaxMsgSize != nc.MaxMsgSize {\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/outputs/nats_outputs/jetstream/jetstream_output_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage jetstream_output\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar jetStreamNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"jetstream_output\",\n\tName:      \"number_of_jetstream_msgs_sent_success_total\",\n\tHelp:      \"Number of msgs successfully sent by gnmic jetstream output\",\n}, []string{\"publisher_id\", \"subject\"})\n\nvar jetStreamNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"jetstream_output\",\n\tName:      \"number_of_written_jetstream_bytes_total\",\n\tHelp:      \"Number of bytes written by gnmic jetstream output\",\n}, []string{\"publisher_id\", \"subject\"})\n\nvar jetStreamNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"jetstream_output\",\n\tName:      \"number_of_jetstream_msgs_sent_fail_total\",\n\tHelp:      \"Number of failed msgs sent by gnmic jetstream output\",\n}, []string{\"publisher_id\", \"reason\"})\n\nvar jetStreamSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"jetstream_output\",\n\tName:      \"msg_send_duration_ns\",\n\tHelp:      \"gnmic jetstream output send duration in ns\",\n}, []string{\"publisher_id\"})\n\nfunc (n *jetstreamOutput) initMetrics() {\n\tcurrCfg := n.cfg.Load()\n\tif currCfg == nil {\n\t\treturn\n\t}\n\tjetStreamNumberOfSentMsgs.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tjetStreamNumberOfSentBytes.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tjetStreamNumberOfFailSendMsgs.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tjetStreamSendDuration.WithLabelValues(currCfg.Name).Set(0)\n}\n\nfunc (n *jetstreamOutput) registerMetrics() error {\n\tcurrCfg := n.cfg.Load()\n\tif currCfg == nil {\n\t\treturn nil\n\t}\n\tif !currCfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif n.reg == nil {\n\t\tn.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn nil\n\t}\n\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = n.reg.Register(jetStreamNumberOfSentMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(jetStreamNumberOfSentBytes); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(jetStreamNumberOfFailSendMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(jetStreamSendDuration); err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\tn.initMetrics()\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/nats_outputs/jetstream/jetstream_output_test.go",
    "content": "package jetstream_output\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"sync/atomic\"\n\n\t\"github.com/nats-io/nats.go\"\n)\n\nfunc Test_isValidRetentionPolicy(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy string\n\t\twant   bool\n\t}{\n\t\t{\n\t\t\tname:   \"valid limits policy\",\n\t\t\tpolicy: \"limits\",\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"valid workqueue policy\",\n\t\t\tpolicy: \"workqueue\",\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"valid limits policy uppercase\",\n\t\t\tpolicy: \"LIMITS\",\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"valid workqueue policy uppercase\",\n\t\t\tpolicy: \"WORKQUEUE\",\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"valid limits policy mixed case\",\n\t\t\tpolicy: \"Limits\",\n\t\t\twant:   true,\n\t\t},\n\t\t{\n\t\t\tname:   \"invalid empty policy\",\n\t\t\tpolicy: \"\",\n\t\t\twant:   false,\n\t\t},\n\t\t{\n\t\t\tname:   \"invalid interest policy\",\n\t\t\tpolicy: \"interest\",\n\t\t\twant:   false,\n\t\t},\n\t\t{\n\t\t\tname:   \"invalid random string\",\n\t\t\tpolicy: \"invalid\",\n\t\t\twant:   false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := isValidRetentionPolicy(tt.policy); got != tt.want {\n\t\t\t\tt.Errorf(\"isValidRetentionPolicy() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_retentionPolicy(t *testing.T) {\n\ttests := []struct {\n\t\tname   string\n\t\tpolicy string\n\t\twant   nats.RetentionPolicy\n\t}{\n\t\t{\n\t\t\tname:   \"workqueue policy lowercase\",\n\t\t\tpolicy: \"workqueue\",\n\t\t\twant:   nats.WorkQueuePolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"workqueue policy uppercase\",\n\t\t\tpolicy: \"WORKQUEUE\",\n\t\t\twant:   nats.WorkQueuePolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"workqueue policy mixed case\",\n\t\t\tpolicy: \"WorkQueue\",\n\t\t\twant:   nats.WorkQueuePolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"limits policy lowercase\",\n\t\t\tpolicy: \"limits\",\n\t\t\twant:   nats.LimitsPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"limits policy uppercase\",\n\t\t\tpolicy: \"LIMITS\",\n\t\t\twant:   nats.LimitsPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"limits policy mixed case\",\n\t\t\tpolicy: \"Limits\",\n\t\t\twant:   nats.LimitsPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"empty string defaults to limits\",\n\t\t\tpolicy: \"\",\n\t\t\twant:   nats.LimitsPolicy,\n\t\t},\n\t\t{\n\t\t\tname:   \"invalid policy defaults to limits\",\n\t\t\tpolicy: \"invalid\",\n\t\t\twant:   nats.LimitsPolicy,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := retentionPolicy(tt.policy); got != tt.want {\n\t\t\t\tt.Errorf(\"retentionPolicy() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_setDefaults(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tcfg     *config\n\t\twantErr bool\n\t\terrMsg  string\n\t}{\n\t\t{\n\t\t\tname: \"missing stream name\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"\",\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terrMsg:  \"missing stream name\",\n\t\t},\n\t\t{\n\t\t\tname: \"valid create-stream with limits retention\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"limits\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"valid create-stream with workqueue retention\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"workqueue\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid retention policy\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"interest\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terrMsg:  \"invalid retention-policy: interest\",\n\t\t},\n\t\t{\n\t\t\tname: \"create-stream with empty retention defaults to limits\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects: []string{\"test.>\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"create-stream with uppercase WORKQUEUE retention\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"WORKQUEUE\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"create-stream with uppercase LIMITS retention\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"LIMITS\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"create-stream with invalid retention\",\n\t\t\tcfg: &config{\n\t\t\t\tStream: \"test-stream\",\n\t\t\t\tCreateStream: &createStreamConfig{\n\t\t\t\t\tSubjects:  []string{\"test.>\"},\n\t\t\t\t\tRetention: \"invalid\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terrMsg:  \"invalid retention-policy: invalid\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tcfg := new(atomic.Pointer[config])\n\t\t\tcfg.Store(tt.cfg)\n\t\t\tn := &jetstreamOutput{\n\t\t\t\tcfg: cfg,\n\t\t\t}\n\t\t\terr := n.setDefaultsFor(tt.cfg)\n\t\t\tif tt.wantErr {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Errorf(\"setDefaults() expected error but got nil\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tt.errMsg != \"\" && !strings.Contains(err.Error(), tt.errMsg) {\n\t\t\t\t\tt.Errorf(\"setDefaults() error = %v, want error containing %v\", err.Error(), tt.errMsg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"setDefaults() unexpected error = %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Verify defaults were set correctly\n\t\t\t\trcfg := cfg.Load()\n\t\t\t\tif rcfg.CreateStream != nil {\n\t\t\t\t\tif rcfg.CreateStream.Retention == \"\" {\n\t\t\t\t\t\tt.Errorf(\"setDefaults() did not set default retention policy\")\n\t\t\t\t\t}\n\t\t\t\t\tif rcfg.CreateStream.Retention != \"\" && rcfg.CreateStream.Retention != \"limits\" && rcfg.CreateStream.Retention != \"workqueue\" && rcfg.CreateStream.Retention != \"LIMITS\" && rcfg.CreateStream.Retention != \"WORKQUEUE\" {\n\t\t\t\t\t\tt.Errorf(\"setDefaults() set invalid retention policy: %s\", rcfg.CreateStream.Retention)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/outputs/nats_outputs/nats/nats_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage nats_output\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar NatsNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"nats_output\",\n\tName:      \"number_of_nats_msgs_sent_success_total\",\n\tHelp:      \"Number of msgs successfully sent by gnmic nats output\",\n}, []string{\"publisher_id\", \"subject\"})\n\nvar NatsNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"nats_output\",\n\tName:      \"number_of_written_nats_bytes_total\",\n\tHelp:      \"Number of bytes written by gnmic nats output\",\n}, []string{\"publisher_id\", \"subject\"})\n\nvar NatsNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"nats_output\",\n\tName:      \"number_of_nats_msgs_sent_fail_total\",\n\tHelp:      \"Number of failed msgs sent by gnmic nats output\",\n}, []string{\"publisher_id\", \"reason\"})\n\nvar NatsSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"nats_output\",\n\tName:      \"msg_send_duration_ns\",\n\tHelp:      \"gnmic nats output send duration in ns\",\n}, []string{\"publisher_id\"})\n\nfunc (n *NatsOutput) initMetrics() {\n\tcurrCfg := n.cfg.Load()\n\tif currCfg == nil {\n\t\treturn\n\t}\n\tNatsNumberOfSentMsgs.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tNatsNumberOfSentBytes.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tNatsNumberOfFailSendMsgs.WithLabelValues(currCfg.Name, \"\").Add(0)\n\tNatsSendDuration.WithLabelValues(currCfg.Name).Set(0)\n}\n\nfunc (n *NatsOutput) registerMetrics() error {\n\tif n.reg == nil {\n\t\treturn nil\n\t}\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = n.reg.Register(NatsNumberOfSentMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(NatsNumberOfSentBytes); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(NatsNumberOfFailSendMsgs); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = n.reg.Register(NatsSendDuration); err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\tn.initMetrics()\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/nats_outputs/nats/nats_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage nats_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/nats-io/nats.go\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tnatsConnectWait         = 2 * time.Second\n\tnatsReconnectBufferSize = 100 * 1024 * 1024\n\tdefaultSubjectName      = \"telemetry\"\n\tdefaultFormat           = \"event\"\n\tdefaultNumWorkers       = 1\n\tdefaultWriteTimeout     = 5 * time.Second\n\tdefaultAddress          = \"localhost:4222\"\n\tloggingPrefix           = \"[nats_output:%s] \"\n)\n\nfunc init() {\n\toutputs.Register(\"nats\", func() outputs.Output {\n\t\treturn &NatsOutput{}\n\t})\n}\n\nfunc (n *NatsOutput) init() {\n\tn.cfg = new(atomic.Pointer[Config])\n\tn.dynCfg = new(atomic.Pointer[dynConfig])\n\tn.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg])\n\tn.wg = new(sync.WaitGroup)\n\tn.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\n// NatsOutput //\ntype NatsOutput struct {\n\toutputs.BaseOutput\n\t// Cfg *Config\n\tcfg    *atomic.Pointer[Config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\t// root context\n\tctx      context.Context\n\tcancelFn context.CancelFunc\n\tmsgChan  *atomic.Pointer[chan *outputs.ProtoMsg] // atomic channel swaps\n\twg       *sync.WaitGroup\n\tlogger   *log.Logger\n\n\treg   *prometheus.Registry\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tmsgTpl    *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n}\n\n// Config //\ntype Config struct {\n\tName               string           `mapstructure:\"name,omitempty\"`\n\tAddress            string           `mapstructure:\"address,omitempty\"`\n\tSubjectPrefix      string           `mapstructure:\"subject-prefix,omitempty\"`\n\tSubject            string           `mapstructure:\"subject,omitempty\"`\n\tUsername           string           `mapstructure:\"username,omitempty\"`\n\tPassword           string           `mapstructure:\"password,omitempty\"`\n\tConnectTimeWait    time.Duration    `mapstructure:\"connect-time-wait,omitempty\"`\n\tTLS                *types.TLSConfig `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tFormat             string           `mapstructure:\"format,omitempty\"`\n\tSplitEvents        bool             `mapstructure:\"split-events,omitempty\"`\n\tAddTarget          string           `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string           `mapstructure:\"target-template,omitempty\"`\n\tMsgTemplate        string           `mapstructure:\"msg-template,omitempty\"`\n\tOverrideTimestamps bool             `mapstructure:\"override-timestamps,omitempty\"`\n\tNumWorkers         int              `mapstructure:\"num-workers,omitempty\"`\n\tWriteTimeout       time.Duration    `mapstructure:\"write-timeout,omitempty\"`\n\tDebug              bool             `mapstructure:\"debug,omitempty\"`\n\tBufferSize         uint             `mapstructure:\"buffer-size,omitempty\"`\n\tEnableMetrics      bool             `mapstructure:\"enable-metrics,omitempty\"`\n\tEventProcessors    []string         `mapstructure:\"event-processors,omitempty\"`\n}\n\nfunc (n *NatsOutput) String() string {\n\tcfg := n.cfg.Load()\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (n *NatsOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(n.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (n *NatsOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && n.logger != nil {\n\t\tn.logger.SetOutput(logger.Writer())\n\t\tn.logger.SetFlags(logger.Flags())\n\t}\n}\n\n// Init //\nfunc (n *NatsOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tn.init() // init struct fields\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\tn.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tn.store = options.Store\n\t// set defaults\n\tn.setDefaultsFor(newCfg)\n\n\tn.cfg.Store(newCfg)\n\n\t// apply logger\n\tn.setLogger(options.Logger)\n\n\t// initialize registry\n\tn.reg = options.Registry\n\terr = n.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize message channel\n\tmsgChan := make(chan *outputs.ProtoMsg, newCfg.BufferSize)\n\tn.msgChan.Store(&msgChan)\n\n\t// prep dynamic config\n\tdc := new(dynConfig)\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\t// initialize event processors\n\tdc.evps, err = n.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize target template\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\t// initialize message template\n\tif newCfg.MsgTemplate != \"\" {\n\t\tdc.msgTpl, err = gtemplate.CreateTemplate(\"msg-template\", newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tn.dynCfg = new(atomic.Pointer[dynConfig])\n\tn.dynCfg.Store(dc)\n\n\t// initialize context\n\tn.ctx, n.cancelFn = context.WithCancel(ctx)\n\tn.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo n.worker(n.ctx, i)\n\t}\n\n\treturn nil\n}\n\nfunc (n *NatsOutput) setDefaultsFor(cfg *Config) {\n\tif cfg.Format == \"\" {\n\t\tcfg.Format = defaultFormat\n\t}\n\tif cfg.Address == \"\" {\n\t\tcfg.Address = defaultAddress\n\t}\n\tif cfg.ConnectTimeWait <= 0 {\n\t\tcfg.ConnectTimeWait = natsConnectWait\n\t}\n\tif cfg.Subject == \"\" && cfg.SubjectPrefix == \"\" {\n\t\tcfg.Subject = defaultSubjectName\n\t}\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = \"gnmic-\" + uuid.New().String()\n\t}\n\tif cfg.NumWorkers <= 0 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n\tif cfg.WriteTimeout <= 0 {\n\t\tcfg.WriteTimeout = defaultWriteTimeout\n\t}\n}\n\nfunc (n *NatsOutput) Validate(cfg map[string]any) error {\n\tncfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"msg-template\", ncfg.MsgTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (n *NatsOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.setDefaultsFor(newCfg)\n\tcurrCfg := n.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\tvar targetTpl *template.Template\n\tif newCfg.TargetTemplate == \"\" {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargetTpl = t.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\ttargetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\tvar msgTpl *template.Template\n\tif newCfg.MsgTemplate != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"msg-template\", newCfg.MsgTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsgTpl = t.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tdc := &dynConfig{\n\t\ttargetTpl: targetTpl,\n\t\tmsgTpl:    msgTpl,\n\t\tmo: &formatters.MarshalOptions{\n\t\t\tFormat:     newCfg.Format,\n\t\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t\t},\n\t}\n\n\tprevDC := n.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\tn.dynCfg.Store(dc)\n\tn.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorkers {\n\t\tvar newMsgChan chan *outputs.ProtoMsg\n\t\tif swapChannel {\n\t\t\tnewMsgChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewMsgChan = *n.msgChan.Load()\n\t\t}\n\n\t\trunCtx, cancel := context.WithCancel(n.ctx)\n\t\tnewWG := new(sync.WaitGroup)\n\t\t// save old pointers\n\t\toldCancel := n.cancelFn\n\t\toldWG := n.wg\n\t\toldMsgChan := *n.msgChan.Load()\n\t\t// swap\n\t\tn.cancelFn = cancel\n\t\tn.wg = newWG\n\t\tn.msgChan.Store(&newMsgChan)\n\n\t\tn.wg.Add(currCfg.NumWorkers)\n\t\tfor i := 0; i < currCfg.NumWorkers; i++ {\n\t\t\tgo n.worker(runCtx, i)\n\t\t}\n\t\t// cancel old workers and loops\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\t\tif swapChannel {\n\t\t\t// best effort drain old channel\n\t\tOUTER_LOOP:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase msg, ok := <-oldMsgChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newMsgChan <- msg:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak OUTER_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tn.logger.Printf(\"updated nats output: %s\", n.String())\n\treturn nil\n\n}\n\nfunc (n *NatsOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := n.cfg.Load()\n\tdc := n.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tn.logger,\n\t\tn.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tn.dynCfg.Store(&newDC)\n\t\tn.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\n// Write //\nfunc (n *NatsOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tdc := n.dynCfg.Load()\n\tcfg := n.cfg.Load()\n\tif rsp == nil || dc == nil || dc.mo == nil {\n\t\treturn\n\t}\n\n\twctx, cancel := context.WithTimeout(ctx, cfg.WriteTimeout)\n\tdefer cancel()\n\n\tch := n.msgChan.Load()\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase *ch <- outputs.NewProtoMsg(rsp, meta):\n\tcase <-wctx.Done():\n\t\tif cfg.Debug {\n\t\t\tn.logger.Printf(\"writing expired after %s, NATS output might not be initialized\", cfg.WriteTimeout)\n\t\t}\n\t\tif cfg.EnableMetrics {\n\t\t\tNatsNumberOfFailSendMsgs.WithLabelValues(cfg.Name, \"timeout\").Inc()\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc (n *NatsOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\n// Close //\nfunc (n *NatsOutput) Close() error {\n\tn.cancelFn()\n\tn.wg.Wait()\n\tn.logger.Printf(\"closed nats output: %s\", n.String())\n\treturn nil\n}\n\nfunc (n *NatsOutput) createNATSConn(c *Config, i int) (*nats.Conn, error) {\n\topts := []nats.Option{\n\t\tnats.Name(fmt.Sprintf(\"%s-%d\", c.Name, i)),\n\t\tnats.SetCustomDialer(n),\n\t\tnats.ReconnectWait(c.ConnectTimeWait),\n\t\tnats.ReconnectBufSize(natsReconnectBufferSize),\n\t\tnats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) {\n\t\t\tn.logger.Printf(\"NATS error: %v\", err)\n\t\t}),\n\t\tnats.DisconnectErrHandler(func(_ *nats.Conn, err error) {\n\t\t\tn.logger.Printf(\"Disconnected from NATS err=%v\", err)\n\t\t}),\n\t\tnats.ClosedHandler(func(*nats.Conn) {\n\t\t\tn.logger.Println(\"NATS connection is closed\")\n\t\t}),\n\t}\n\tif c.Username != \"\" && c.Password != \"\" {\n\t\topts = append(opts, nats.UserInfo(c.Username, c.Password))\n\t}\n\tif c.TLS != nil {\n\t\ttlsConfig, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile,\n\t\t\tc.TLS.CertFile,\n\t\t\tc.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tc.TLS.SkipVerify,\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif tlsConfig != nil {\n\t\t\topts = append(opts, nats.Secure(tlsConfig))\n\t\t}\n\t}\n\tnc, err := nats.Connect(c.Address, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nc, nil\n}\n\n// Dial //\nfunc (n *NatsOutput) Dial(network, address string) (net.Conn, error) {\n\tctx, cancel := context.WithCancel(n.ctx)\n\tdefer cancel()\n\n\tfor {\n\t\tcfg := n.cfg.Load()\n\t\tn.logger.Printf(\"attempting to connect to %s\", address)\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\n\t\tselect {\n\t\tcase <-n.ctx.Done():\n\t\t\treturn nil, n.ctx.Err()\n\t\tdefault:\n\t\t\td := &net.Dialer{}\n\t\t\tconn, err := d.DialContext(ctx, network, address)\n\t\t\tif err != nil {\n\t\t\t\tn.logger.Printf(\"failed to connect to NATS server %s: %v\", address, err)\n\t\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tn.logger.Printf(\"successfully connected to NATS server %s\", address)\n\t\t\treturn conn, nil\n\t\t}\n\t}\n}\n\nfunc (n *NatsOutput) worker(ctx context.Context, i int) {\n\tdefer n.wg.Done()\n\tvar natsConn *nats.Conn\n\tvar err error\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", i)\n\n\tdefer n.logger.Printf(\"%s exited\", workerLogPrefix)\n\tn.logger.Printf(\"%s starting\", workerLogPrefix)\n\tmsgChan := *n.msgChan.Load()\nCRCONN:\n\tif ctx.Err() != nil {\n\t\treturn\n\t}\n\tcfg := n.cfg.Load()\n\tnatsConn, err = n.createNATSConn(cfg, i)\n\tif err != nil {\n\t\tn.logger.Printf(\"%s failed to create connection: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.ConnectTimeWait)\n\t\tgoto CRCONN\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tn.logger.Printf(\"%s flushing\", workerLogPrefix)\n\t\t\tnatsConn.FlushTimeout(time.Second)\n\t\t\tn.logger.Printf(\"%s shutting down\", workerLogPrefix)\n\t\t\tnatsConn.Close()\n\t\t\treturn\n\t\tcase m := <-msgChan:\n\t\t\tpmsg := m.GetMsg()\n\t\t\t// get fresh config\n\t\t\tcfg := n.cfg.Load()\n\t\t\t// snapshot template and marshal options\n\t\t\tdc := n.dynCfg.Load()\n\t\t\tname := fmt.Sprintf(\"%s-%d\", cfg.Name, i)\n\t\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\t\tif err != nil {\n\t\t\t\tn.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t\t}\n\t\t\tbb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tn.logger.Printf(\"%s failed marshaling proto msg: %v\", workerLogPrefix, err)\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tNatsNumberOfFailSendMsgs.WithLabelValues(name, \"marshal_error\").Inc()\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(bb) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, b := range bb {\n\t\t\t\tif dc.msgTpl != nil {\n\t\t\t\t\tb, err = outputs.ExecTemplate(b, dc.msgTpl)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\t\tlog.Printf(\"failed to execute template: %v\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tNatsNumberOfFailSendMsgs.WithLabelValues(name, \"template_error\").Inc()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsubject := n.subjectName(m.GetMeta(), cfg)\n\t\t\t\tvar start time.Time\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tstart = time.Now()\n\t\t\t\t}\n\t\t\t\terr = natsConn.Publish(subject, b)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tn.logger.Printf(\"%s failed to write to nats subject '%s': %v\", workerLogPrefix, subject, err)\n\t\t\t\t\t}\n\t\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\t\tNatsNumberOfFailSendMsgs.WithLabelValues(cfg.Name, \"publish_error\").Inc()\n\t\t\t\t\t}\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tn.logger.Printf(\"%s closing connection to NATS '%s'\", workerLogPrefix, subject)\n\t\t\t\t\t}\n\n\t\t\t\t\tnatsConn.Close()\n\t\t\t\t\ttime.Sleep(cfg.ConnectTimeWait)\n\n\t\t\t\t\tif cfg.Debug {\n\t\t\t\t\t\tn.logger.Printf(\"%s reconnecting to NATS\", workerLogPrefix)\n\t\t\t\t\t}\n\t\t\t\t\tgoto CRCONN\n\t\t\t\t}\n\t\t\t\tif cfg.EnableMetrics {\n\t\t\t\t\tNatsSendDuration.WithLabelValues(name).Set(float64(time.Since(start).Nanoseconds()))\n\t\t\t\t\tNatsNumberOfSentMsgs.WithLabelValues(name, subject).Inc()\n\t\t\t\t\tNatsNumberOfSentBytes.WithLabelValues(name, subject).Add(float64(len(b)))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\nfunc (n *NatsOutput) subjectName(meta outputs.Meta, cfg *Config) string {\n\tif cfg.SubjectPrefix != \"\" {\n\t\tssb := stringBuilderPool.Get().(*strings.Builder)\n\t\tdefer func() {\n\t\t\tssb.Reset()\n\t\t\tstringBuilderPool.Put(ssb)\n\t\t}()\n\t\tssb.WriteString(cfg.SubjectPrefix)\n\n\t\tif s, ok := meta[\"source\"]; ok {\n\t\t\tssb.WriteString(\".\")\n\t\t\tfor _, r := range s {\n\t\t\t\tswitch r {\n\t\t\t\tcase '.':\n\t\t\t\t\tssb.WriteRune('-')\n\t\t\t\tcase ' ':\n\t\t\t\t\tssb.WriteRune('_')\n\t\t\t\tdefault:\n\t\t\t\t\tssb.WriteRune(r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif subname, ok := meta[\"subscription-name\"]; ok {\n\t\t\tssb.WriteString(\".\")\n\t\t\tfor _, r := range subname {\n\t\t\t\tif r == ' ' {\n\t\t\t\t\tssb.WriteRune('_')\n\t\t\t\t} else {\n\t\t\t\t\tssb.WriteRune(r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn ssb.String()\n\t}\n\treturn strings.ReplaceAll(cfg.Subject, \" \", \"_\")\n}\n\nfunc channelNeedsSwap(old, nw *Config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *Config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\told.Address != nw.Address ||\n\t\told.Username != nw.Username ||\n\t\told.Password != nw.Password\n}\n"
  },
  {
    "path": "pkg/outputs/options.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage outputs\n\nimport (\n\t\"log\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\ntype OutputOptions struct {\n\tName        string\n\tClusterName string\n\tLogger      *log.Logger\n\tRegistry    *prometheus.Registry\n\tStore       store.Store[any]\n}\n\ntype Option func(*OutputOptions) error\n\nfunc WithLogger(logger *log.Logger) Option {\n\treturn func(o *OutputOptions) error {\n\t\to.Logger = logger\n\t\treturn nil\n\t}\n}\n\nfunc WithRegistry(reg *prometheus.Registry) Option {\n\treturn func(o *OutputOptions) error {\n\t\to.Registry = reg\n\t\treturn nil\n\t}\n}\n\nfunc WithName(name string) Option {\n\treturn func(o *OutputOptions) error {\n\t\to.Name = name\n\t\treturn nil\n\t}\n}\n\nfunc WithClusterName(name string) Option {\n\treturn func(o *OutputOptions) error {\n\t\to.ClusterName = name\n\t\treturn nil\n\t}\n}\n\nfunc WithConfigStore(st store.Store[any]) Option {\n\treturn func(o *OutputOptions) error {\n\t\to.Store = st\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "pkg/outputs/otlp_output/otlp_converter.go",
    "content": "// © 2025-2026 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage otlp_output\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"google.golang.org/grpc/metadata\"\n\n\tmetricsv1 \"go.opentelemetry.io/proto/otlp/collector/metrics/v1\"\n\tcommonpb \"go.opentelemetry.io/proto/otlp/common/v1\"\n\tmetricspb \"go.opentelemetry.io/proto/otlp/metrics/v1\"\n\tresourcepb \"go.opentelemetry.io/proto/otlp/resource/v1\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/version\"\n)\n\nvar stringsBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn &strings.Builder{}\n\t},\n}\n\n// convertToOTLP converts gNMI EventMsg slice to OTLP ExportMetricsServiceRequest\nfunc (o *otlpOutput) convertToOTLP(events []*formatters.EventMsg) *metricsv1.ExportMetricsServiceRequest {\n\tcfg := o.cfg.Load()\n\n\tif cfg.Debug {\n\t\to.logger.Printf(\"DEBUG: convertToOTLP called with %d events\", len(events))\n\t}\n\n\t// Group events by resource (source)\n\tresourceGroups := o.groupByResource(events)\n\n\tif cfg.Debug {\n\t\to.logger.Printf(\"DEBUG: Grouped into %d resource groups\", len(resourceGroups))\n\t}\n\n\treq := &metricsv1.ExportMetricsServiceRequest{\n\t\tResourceMetrics: make([]*metricspb.ResourceMetrics, 0, len(resourceGroups)),\n\t}\n\n\ttotalMetrics := 0\n\tskippedEvents := 0\n\n\tfor _, groupedEvents := range resourceGroups {\n\t\trm := &metricspb.ResourceMetrics{\n\t\t\tResource: o.createResource(cfg, groupedEvents[0]),\n\t\t\tScopeMetrics: []*metricspb.ScopeMetrics{\n\t\t\t\t{\n\t\t\t\t\tScope: &commonpb.InstrumentationScope{\n\t\t\t\t\t\tName:    \"gNMIc\",\n\t\t\t\t\t\tVersion: version.Version,\n\t\t\t\t\t},\n\t\t\t\t\tMetrics: make([]*metricspb.Metric, 0, len(groupedEvents)),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// Convert each event to OTLP metric\n\t\tfor _, event := range groupedEvents {\n\t\t\tmetrics, err := o.convertEventToMetrics(cfg, event)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\to.logger.Printf(\"DEBUG: failed to convert event %s: %v\", event.Name, err)\n\t\t\t\t}\n\t\t\t\tskippedEvents++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(metrics) == 0 {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\to.logger.Printf(\"DEBUG: convertEvent returned nil for event: name=%s, values=%v\", event.Name, event.Values)\n\t\t\t\t}\n\t\t\t\tskippedEvents++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trm.ScopeMetrics[0].Metrics = append(rm.ScopeMetrics[0].Metrics, metrics...)\n\t\t\ttotalMetrics += len(metrics)\n\t\t}\n\n\t\tif len(rm.ScopeMetrics[0].Metrics) > 0 {\n\t\t\treq.ResourceMetrics = append(req.ResourceMetrics, rm)\n\t\t}\n\t}\n\n\tif cfg.Debug {\n\t\to.logger.Printf(\"DEBUG: Converted %d metrics, skipped %d events, %d ResourceMetrics\",\n\t\t\ttotalMetrics, skippedEvents, len(req.ResourceMetrics))\n\t}\n\n\treturn req\n}\n\n// groupByResource groups events by their source (device) for resource attribution\nfunc (o *otlpOutput) groupByResource(events []*formatters.EventMsg) map[string][]*formatters.EventMsg {\n\tgroups := make(map[string][]*formatters.EventMsg)\n\n\tfor _, event := range events {\n\t\t// Use source as resource key\n\t\tsource := event.Tags[\"source\"]\n\t\tif source == \"\" {\n\t\t\tsource = \"unknown\"\n\t\t}\n\t\tgroups[source] = append(groups[source], event)\n\t}\n\n\treturn groups\n}\n\n// createResource creates OTLP Resource from event metadata.\n// Tags listed in cfg.ResourceTagKeys are placed as resource attributes.\nfunc (o *otlpOutput) createResource(cfg *config, event *formatters.EventMsg) *resourcepb.Resource {\n\tattrs := make([]*commonpb.KeyValue, 0, len(cfg.ResourceTagKeys)+len(cfg.ResourceAttributes))\n\n\tfor _, key := range cfg.ResourceTagKeys {\n\t\tif val, ok := event.Tags[key]; ok {\n\t\t\tattrs = append(attrs, &commonpb.KeyValue{\n\t\t\t\tKey:   key,\n\t\t\t\tValue: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}},\n\t\t\t})\n\t\t}\n\t}\n\n\tfor key, val := range cfg.ResourceAttributes {\n\t\tattrs = append(attrs, &commonpb.KeyValue{\n\t\t\tKey:   key,\n\t\t\tValue: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}},\n\t\t})\n\t}\n\n\treturn &resourcepb.Resource{\n\t\tAttributes: attrs,\n\t}\n}\n\n// convertEventToMetrics converts a single gNMI event to OTLP metrics.\n// Returns nil if the event has no valid values to convert.\nfunc (o *otlpOutput) convertEventToMetrics(cfg *config, event *formatters.EventMsg) ([]*metricspb.Metric, error) {\n\tif len(event.Values) == 0 {\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"DEBUG: event has no values (event: %s)\", event.Name)\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tattributes := o.extractAttributesForMetric(cfg, event)\n\n\tresult := make([]*metricspb.Metric, 0, len(event.Values))\n\tfor k, v := range event.Values {\n\t\tmetricName := o.buildMetricName(cfg, event, k)\n\n\t\tmetric := &metricspb.Metric{\n\t\t\tName: metricName,\n\t\t}\n\n\t\t// Handle string values\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\tif !cfg.StringsAsAttributes {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\to.logger.Printf(\"DEBUG: skipping string value (strings-as-attributes=false): %s\", event.Name)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmetric.Data = &metricspb.Metric_Gauge{\n\t\t\t\tGauge: o.createGaugeWithString(event, attributes, v),\n\t\t\t}\n\t\t\tresult = append(result, metric)\n\t\t\tcontinue\n\t\t}\n\n\t\tdataPoint := o.createNumberDataPointWithValue(cfg, event, attributes, v)\n\t\tif dataPoint == nil {\n\t\t\tif cfg.Debug {\n\t\t\t\to.logger.Printf(\"DEBUG: failed to create data point for value type %T (event: %s)\", v, event.Name)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif o.isCounter(cfg, k) {\n\t\t\tmetric.Data = &metricspb.Metric_Sum{\n\t\t\t\tSum: &metricspb.Sum{\n\t\t\t\t\tAggregationTemporality: metricspb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,\n\t\t\t\t\tIsMonotonic:            true,\n\t\t\t\t\tDataPoints:             []*metricspb.NumberDataPoint{dataPoint},\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\tmetric.Data = &metricspb.Metric_Gauge{\n\t\t\t\tGauge: &metricspb.Gauge{\n\t\t\t\t\tDataPoints: []*metricspb.NumberDataPoint{dataPoint},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\tresult = append(result, metric)\n\t}\n\n\treturn result, nil\n}\n\n// buildMetricName creates metric name from event and value key\n// event.Name contains the subscription name (e.g., \"nvos\", \"arista\")\n// valueKey contains the metric path (e.g., \"interfaces/interface/state/counters/in-octets\")\nfunc (o *otlpOutput) buildMetricName(cfg *config, event *formatters.EventMsg, valueKey string) string {\n\tsb := stringsBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringsBuilderPool.Put(sb)\n\t}()\n\n\t// Add global prefix if configured\n\tif cfg.MetricPrefix != \"\" {\n\t\tsb.WriteString(cfg.MetricPrefix)\n\t\tsb.WriteString(\"_\")\n\t}\n\n\t// Append subscription name if configured (for vendor-specific prefixes)\n\tif cfg.AppendSubscriptionName {\n\t\tsb.WriteString(event.Name) // subscription name (nvos, arista, etc.)\n\t\tsb.WriteString(\"_\")\n\t}\n\n\t// Append the value key (metric path), converting slashes to underscores\n\t// e.g., \"interfaces/interface/state/counters/in-octets\" -> \"interfaces_interface_state_counters_in_octets\"\n\t// gNMI paths arrive with a leading \"/\"; strip it when configured so the conversion\n\t// does not produce a leading \"_\" (or a double \"_\" when a prefix is set).\n\tpath := valueKey\n\tif cfg.StripLeadingUnderscore {\n\t\tpath = strings.TrimPrefix(path, \"/\")\n\t}\n\tmetricPath := strings.ReplaceAll(path, \"/\", \"_\")\n\tsb.WriteString(metricPath)\n\n\tname := sb.String()\n\n\t// Replace remaining hyphens with underscores (Prometheus convention)\n\tname = strings.ReplaceAll(name, \"-\", \"_\")\n\n\treturn name\n}\n\n// extractAttributesForMetric extracts data point attributes from event tags.\n// Tags listed in cfg.ResourceTagKeys are excluded (they live on the Resource).\nfunc (o *otlpOutput) extractAttributesForMetric(cfg *config, event *formatters.EventMsg) []*commonpb.KeyValue {\n\tattrs := make([]*commonpb.KeyValue, 0, len(event.Tags))\n\n\tfor key, val := range event.Tags {\n\t\tif cfg.resourceTagSet[key] {\n\t\t\tcontinue\n\t\t}\n\t\tattrs = append(attrs, &commonpb.KeyValue{\n\t\t\tKey:   key,\n\t\t\tValue: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}},\n\t\t})\n\t}\n\n\treturn attrs\n}\n\n// isCounter returns true if any of the configured counter-patterns match the value key.\nfunc (o *otlpOutput) isCounter(cfg *config, valueName string) bool {\n\tfor _, re := range cfg.counterRegexes {\n\t\tif re.MatchString(valueName) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// createNumberDataPointWithValue creates OTLP data point from event with a specific value\nfunc (o *otlpOutput) createNumberDataPointWithValue(cfg *config, event *formatters.EventMsg, attrs []*commonpb.KeyValue, value interface{}) *metricspb.NumberDataPoint {\n\tdp := &metricspb.NumberDataPoint{\n\t\tAttributes:   attrs,\n\t\tTimeUnixNano: uint64(event.Timestamp),\n\t}\n\n\t// Handle value conversion\n\tswitch v := value.(type) {\n\tcase int:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)}\n\tcase int32:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)}\n\tcase int64:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: v}\n\tcase uint:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)}\n\tcase uint32:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)}\n\tcase uint64:\n\t\t// Handle potential overflow\n\t\tmaxInt64 := uint64(9223372036854775807) // math.MaxInt64\n\t\tif v > maxInt64 {\n\t\t\t// Convert to double if too large for int64\n\t\t\tdp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: float64(v)}\n\t\t} else {\n\t\t\tdp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)}\n\t\t}\n\tcase float32:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: float64(v)}\n\tcase float64:\n\t\tdp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: v}\n\tcase string:\n\t\t// Try to parse as number\n\t\tif fVal, err := strconv.ParseFloat(v, 64); err == nil {\n\t\t\tdp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: fVal}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\tdefault:\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"unsupported value type %T for metric %s\", v, event.Name)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn dp\n}\n\n// createGaugeWithString creates OTLP Gauge with string value as attribute.\n// It copies attrs to avoid mutating the caller's shared slice.\nfunc (o *otlpOutput) createGaugeWithString(event *formatters.EventMsg, attrs []*commonpb.KeyValue, strVal string) *metricspb.Gauge {\n\tdpAttrs := make([]*commonpb.KeyValue, len(attrs), len(attrs)+1)\n\tcopy(dpAttrs, attrs)\n\tdpAttrs = append(dpAttrs, &commonpb.KeyValue{\n\t\tKey:   \"value\",\n\t\tValue: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: strVal}},\n\t})\n\n\tdp := &metricspb.NumberDataPoint{\n\t\tAttributes:   dpAttrs,\n\t\tTimeUnixNano: uint64(event.Timestamp),\n\t\tValue:        &metricspb.NumberDataPoint_AsDouble{AsDouble: 1.0},\n\t}\n\n\treturn &metricspb.Gauge{\n\t\tDataPoints: []*metricspb.NumberDataPoint{dp},\n\t}\n}\n\n// validateRequest validates OTLP request structure\nfunc (o *otlpOutput) validateRequest(req *metricsv1.ExportMetricsServiceRequest) error {\n\tif req == nil {\n\t\treturn fmt.Errorf(\"request is nil\")\n\t}\n\n\tif len(req.ResourceMetrics) == 0 {\n\t\treturn fmt.Errorf(\"ResourceMetrics is empty\")\n\t}\n\n\tfor i, rm := range req.ResourceMetrics {\n\t\tif rm.Resource == nil {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].Resource is nil\", i)\n\t\t}\n\n\t\tif len(rm.ScopeMetrics) == 0 {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics is empty\", i)\n\t\t}\n\n\t\tfor j, sm := range rm.ScopeMetrics {\n\t\t\tif sm.Scope == nil {\n\t\t\t\treturn fmt.Errorf(\"ScopeMetrics[%d].Scope is nil\", j)\n\t\t\t}\n\n\t\t\tif len(sm.Metrics) == 0 {\n\t\t\t\treturn fmt.Errorf(\"ScopeMetrics[%d].Metrics is empty\", j)\n\t\t\t}\n\n\t\t\tfor k, m := range sm.Metrics {\n\t\t\t\tif m.Name == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"Metric[%d].Name is empty\", k)\n\t\t\t\t}\n\n\t\t\t\tif m.Data == nil {\n\t\t\t\t\treturn fmt.Errorf(\"Metric[%d].Data is nil\", k)\n\t\t\t\t}\n\n\t\t\t\tif err := o.validateMetricData(i, j, k, m); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// validateMetricData validates metric data points\nfunc (o *otlpOutput) validateMetricData(rmIdx, smIdx, mIdx int, m *metricspb.Metric) error {\n\tvar dataPoints []*metricspb.NumberDataPoint\n\n\tswitch data := m.Data.(type) {\n\tcase *metricspb.Metric_Gauge:\n\t\tif data.Gauge == nil {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].Gauge is nil\", rmIdx, smIdx, mIdx)\n\t\t}\n\t\tdataPoints = data.Gauge.DataPoints\n\tcase *metricspb.Metric_Sum:\n\t\tif data.Sum == nil {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].Sum is nil\", rmIdx, smIdx, mIdx)\n\t\t}\n\t\tdataPoints = data.Sum.DataPoints\n\tcase *metricspb.Metric_Histogram:\n\t\treturn nil\n\tcase *metricspb.Metric_ExponentialHistogram:\n\t\treturn nil\n\tcase *metricspb.Metric_Summary:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d] has unknown data type: %T\", rmIdx, smIdx, mIdx, m.Data)\n\t}\n\n\tif len(dataPoints) == 0 {\n\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d] has no data points\", rmIdx, smIdx, mIdx)\n\t}\n\n\tfor dpIdx, dp := range dataPoints {\n\t\tif dp.TimeUnixNano == 0 {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has zero timestamp\", rmIdx, smIdx, mIdx, dpIdx)\n\t\t}\n\n\t\tif dp.Value == nil {\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has nil value\", rmIdx, smIdx, mIdx, dpIdx)\n\t\t}\n\n\t\tswitch v := dp.Value.(type) {\n\t\tcase *metricspb.NumberDataPoint_AsInt:\n\t\t\t// Valid\n\t\tcase *metricspb.NumberDataPoint_AsDouble:\n\t\t\t// Valid\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has invalid value type: %T\", rmIdx, smIdx, mIdx, dpIdx, v)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// sendGRPC sends the OTLP metrics via gRPC\nfunc (o *otlpOutput) sendGRPC(ctx context.Context, req *metricsv1.ExportMetricsServiceRequest) error {\n\tcfg := o.cfg.Load()\n\tgs := o.grpcState.Load()\n\n\tif gs == nil || gs.client == nil {\n\t\treturn fmt.Errorf(\"gRPC client not initialized\")\n\t}\n\n\tif err := o.validateRequest(req); err != nil {\n\t\to.logger.Printf(\"VALIDATION ERROR: %v\", err)\n\t\treturn fmt.Errorf(\"request validation failed: %w\", err)\n\t}\n\n\tif len(cfg.Headers) > 0 {\n\t\tmd := metadata.New(cfg.Headers)\n\t\tctx = metadata.NewOutgoingContext(ctx, md)\n\t}\n\n\tif cfg.Timeout > 0 {\n\t\tvar cancel func()\n\t\tctx, cancel = context.WithTimeout(ctx, cfg.Timeout)\n\t\tdefer cancel()\n\t}\n\n\tif cfg.Debug {\n\t\to.logger.Printf(\"DEBUG: Sending OTLP request with %d ResourceMetrics\", len(req.ResourceMetrics))\n\t\tif len(req.ResourceMetrics) > 0 && len(req.ResourceMetrics[0].ScopeMetrics) > 0 {\n\t\t\to.logger.Printf(\"DEBUG: First ScopeMetric has %d Metrics\", len(req.ResourceMetrics[0].ScopeMetrics[0].Metrics))\n\t\t}\n\t}\n\n\tresponse, err := gs.client.Export(ctx, req)\n\tif err != nil {\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"DEBUG: gRPC Export returned error: %v\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"grpc export failed: %w\", err)\n\t}\n\n\tif cfg.Debug {\n\t\to.logger.Printf(\"DEBUG: gRPC Export succeeded\")\n\t}\n\n\tif response.PartialSuccess != nil && response.PartialSuccess.RejectedDataPoints > 0 {\n\t\terrMsg := fmt.Sprintf(\"OTEL rejected %d data points: %s\",\n\t\t\tresponse.PartialSuccess.RejectedDataPoints,\n\t\t\tresponse.PartialSuccess.ErrorMessage)\n\t\to.logger.Printf(\"ERROR: %s\", errMsg)\n\t\tif cfg.EnableMetrics {\n\t\t\totlpRejectedDataPoints.WithLabelValues(cfg.Name).Add(float64(response.PartialSuccess.RejectedDataPoints))\n\t\t}\n\t\treturn fmt.Errorf(\"%s\", errMsg)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/outputs/otlp_output/otlp_metrics.go",
    "content": "// © 2025 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage otlp_output\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar otlpNumberOfSentEvents = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"otlp_output\",\n\tName:      \"number_of_sent_events_total\",\n\tHelp:      \"Number of events successfully sent to OTLP collector\",\n}, []string{\"output_name\"})\n\nvar otlpNumberOfFailedEvents = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"otlp_output\",\n\tName:      \"number_of_failed_events_total\",\n\tHelp:      \"Number of events that failed to send to OTLP collector\",\n}, []string{\"output_name\", \"reason\"})\n\nvar otlpSendDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"otlp_output\",\n\tName:      \"send_duration_seconds\",\n\tHelp:      \"Duration of sending batches to OTLP collector\",\n}, []string{\"output_name\"})\n\nvar otlpRejectedDataPoints = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"otlp_output\",\n\tName:      \"rejected_data_points_total\",\n\tHelp:      \"Number of data points rejected by OTLP collector (PartialSuccess)\",\n}, []string{\"output_name\"})\n"
  },
  {
    "path": "pkg/outputs/otlp_output/otlp_output.go",
    "content": "// © 2025-2026 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage otlp_output\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\tmetricsv1 \"go.opentelemetry.io/proto/otlp/collector/metrics/v1\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\toutputType        = \"otlp\"\n\tdefaultTimeout    = 10 * time.Second\n\tdefaultBatchSize  = 1000\n\tdefaultNumWorkers = 1\n\tdefaultMaxRetries = 3\n\tdefaultProtocol   = \"grpc\"\n\tloggingPrefix     = \"[otlp_output:%s] \"\n)\n\nfunc init() {\n\toutputs.Register(outputType, func() outputs.Output {\n\t\treturn &otlpOutput{}\n\t})\n}\n\n// otlpOutput implements the Output interface for OTLP metrics export\ntype otlpOutput struct {\n\toutputs.BaseOutput\n\n\tcfg       *atomic.Pointer[config]\n\tdynCfg    *atomic.Pointer[dynConfig]\n\tgrpcState *atomic.Pointer[grpcClientState]\n\teventCh   *atomic.Pointer[chan *formatters.EventMsg]\n\n\tlogger   *log.Logger\n\trootCtx  context.Context\n\tcancelFn context.CancelFunc\n\twg       *sync.WaitGroup\n\n\t// Metrics\n\treg *prometheus.Registry\n\t// store\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\tevps []formatters.EventProcessor\n}\n\ntype grpcClientState struct {\n\tconn   *grpc.ClientConn\n\tclient metricsv1.MetricsServiceClient\n}\n\n// config holds the OTLP output configuration\ntype config struct {\n\t// name of the output\n\tName string `mapstructure:\"name,omitempty\"`\n\t// endpoint of the OTLP collector\n\tEndpoint string `mapstructure:\"endpoint,omitempty\"`\n\t// \"grpc\" or \"http\"\n\t// defaults to \"grpc\"\n\tProtocol string `mapstructure:\"protocol,omitempty\"`\n\t// RPC timeout\n\tTimeout time.Duration `mapstructure:\"timeout,omitempty\"`\n\t// TLS configuration\n\tTLS *types.TLSConfig `mapstructure:\"tls,omitempty\"`\n\n\t// Batching\n\tBatchSize  int           `mapstructure:\"batch-size,omitempty\"`\n\tInterval   time.Duration `mapstructure:\"interval,omitempty\"`\n\tBufferSize int           `mapstructure:\"buffer-size,omitempty\"`\n\n\t// Retry\n\tMaxRetries int `mapstructure:\"max-retries,omitempty\"`\n\n\t// Metric naming\n\t// string, to be used as the metric namespace\n\tMetricPrefix string `mapstructure:\"metric-prefix,omitempty\"`\n\t// boolean, if true the subscription name will be prepended to the metric name after the prefix.\n\tAppendSubscriptionName bool `mapstructure:\"append-subscription-name,omitempty\"`\n\t// boolean, if true, string type values are exported as gauge metrics with value=1\n\t// and the string stored as an attribute named \"value\".\n\t// if false, string values are dropped.\n\tStringsAsAttributes bool `mapstructure:\"strings-as-attributes,omitempty\"`\n\t// boolean, if true, the leading \"/\" of the metric path is trimmed before the\n\t// slash-to-underscore conversion, so a path like \"/interfaces/...\" becomes\n\t// \"interfaces_...\" instead of \"_interfaces_...\". Defaults to false for\n\t// backward compatibility.\n\tStripLeadingUnderscore bool `mapstructure:\"strip-leading-underscore,omitempty\"`\n\n\t// Tags whose values are placed as OTLP Resource attributes and excluded\n\t// from data point attributes.\n\t// Set to an empty list to put all tags on data points (useful for Prometheus compatibility).\n\tResourceTagKeys []string `mapstructure:\"resource-tag-keys,omitempty\"`\n\n\t// Regex patterns matched against the value key to classify a metric as a\n\t// monotonic cumulative counter (Sum). Unmatched metrics become Gauges.\n\t// If empty, all metrics are exported as Gauges.\n\tCounterPatterns []string `mapstructure:\"counter-patterns,omitempty\"`\n\n\t// Resource attributes\n\tResourceAttributes map[string]string `mapstructure:\"resource-attributes,omitempty\"`\n\n\t// Headers to include with every export request (gRPC metadata / HTTP headers).\n\t// Use this to set e.g. \"X-Scope-OrgID\" for Grafana Mimir, Loki, Tempo, etc.\n\tHeaders map[string]string `mapstructure:\"headers,omitempty\"`\n\n\t// Precomputed lookup set for ResourceTagKeys (not from config file).\n\tresourceTagSet map[string]bool\n\t// Compiled regexes from CounterPatterns.\n\tcounterRegexes []*regexp.Regexp\n\n\t// Performance\n\tNumWorkers int `mapstructure:\"num-workers,omitempty\"`\n\n\t// Debugging\n\tDebug         bool `mapstructure:\"debug,omitempty\"`\n\tEnableMetrics bool `mapstructure:\"enable-metrics,omitempty\"`\n\n\t// Event processors\n\tEventProcessors []string `mapstructure:\"event-processors,omitempty\"`\n}\n\nfunc (o *otlpOutput) initFields() {\n\to.cfg = new(atomic.Pointer[config])\n\to.dynCfg = new(atomic.Pointer[dynConfig])\n\to.grpcState = new(atomic.Pointer[grpcClientState])\n\to.eventCh = new(atomic.Pointer[chan *formatters.EventMsg])\n\to.wg = new(sync.WaitGroup)\n\to.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\nfunc (o *otlpOutput) String() string {\n\tcfg := o.cfg.Load()\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\n// Init initializes the OTLP output\nfunc (o *otlpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\to.initFields()\n\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ncfg.Name == \"\" {\n\t\tncfg.Name = name\n\t}\n\to.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\to.store = options.Store\n\n\t// Set defaults\n\tif options.Name != \"\" {\n\t\tncfg.Name = options.Name\n\t}\n\to.setDefaultsFor(ncfg)\n\n\t// Apply logger\n\tif options.Logger != nil {\n\t\to.logger.SetOutput(options.Logger.Writer())\n\t\to.logger.SetFlags(options.Logger.Flags())\n\t}\n\n\to.cfg.Store(ncfg)\n\n\t// Initialize registry\n\to.reg = options.Registry\n\terr = o.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize event processors\n\tdc := new(dynConfig)\n\tdc.evps, err = o.buildEventProcessors(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\to.dynCfg.Store(dc)\n\n\t// Initialize transport\n\tswitch ncfg.Protocol {\n\tcase \"grpc\":\n\t\tgs, err := o.initGRPCFor(ncfg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to initialize gRPC transport: %w\", err)\n\t\t}\n\t\to.grpcState.Store(gs)\n\tcase \"http\":\n\t\treturn fmt.Errorf(\"HTTP transport not yet implemented\")\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported protocol '%s': must be 'grpc' or 'http'\", ncfg.Protocol)\n\t}\n\n\t// Initialize worker channels\n\teventCh := make(chan *formatters.EventMsg, ncfg.BufferSize)\n\to.eventCh.Store(&eventCh)\n\n\t// Start workers\n\to.rootCtx = ctx\n\tvar wctx context.Context\n\twctx, o.cancelFn = context.WithCancel(o.rootCtx)\n\to.wg.Add(ncfg.NumWorkers)\n\tfor i := 0; i < ncfg.NumWorkers; i++ {\n\t\tgo o.worker(wctx, i)\n\t}\n\n\to.logger.Printf(\"initialized OTLP output: endpoint=%s, protocol=%s, batch-size=%d, workers=%d\",\n\t\tncfg.Endpoint, ncfg.Protocol, ncfg.BatchSize, ncfg.NumWorkers)\n\n\treturn nil\n}\n\nfunc (o *otlpOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.setDefaultsFor(newCfg)\n\tif err := o.validateConfig(newCfg); err != nil {\n\t\treturn err\n\t}\n\n\tcurrCfg := o.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildGRPC := needsGRPCRebuild(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\tdc := new(dynConfig)\n\tprevDC := o.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = o.buildEventProcessors(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\to.dynCfg.Store(dc)\n\n\tif rebuildGRPC {\n\t\tgs, err := o.initGRPCFor(newCfg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to rebuild gRPC transport: %w\", err)\n\t\t}\n\t\toldState := o.grpcState.Swap(gs)\n\t\tif oldState != nil && oldState.conn != nil {\n\t\t\toldState.conn.Close()\n\t\t}\n\t}\n\n\to.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorkers {\n\t\tvar newChan chan *formatters.EventMsg\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan *formatters.EventMsg, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewChan = *o.eventCh.Load()\n\t\t}\n\n\t\trunCtx, cancel := context.WithCancel(o.rootCtx)\n\t\tnewWG := new(sync.WaitGroup)\n\n\t\toldCancel := o.cancelFn\n\t\toldWG := o.wg\n\t\toldEventCh := *o.eventCh.Load()\n\n\t\to.cancelFn = cancel\n\t\to.wg = newWG\n\t\to.eventCh.Store(&newChan)\n\n\t\to.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo o.worker(runCtx, i)\n\t\t}\n\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\n\t\tif swapChannel {\n\t\tOUTER_LOOP:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase ev, ok := <-oldEventCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak OUTER_LOOP\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- ev:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak OUTER_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\to.logger.Printf(\"updated OTLP output: %s\", o.String())\n\treturn nil\n}\n\nfunc (o *otlpOutput) Validate(cfg map[string]any) error {\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\to.setDefaultsFor(ncfg)\n\treturn o.validateConfig(ncfg)\n}\n\nfunc (o *otlpOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := o.cfg.Load()\n\tdc := o.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\to.logger,\n\t\to.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\to.dynCfg.Store(&newDC)\n\t\to.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\n// Write handles incoming gNMI messages\nfunc (o *otlpOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\tcfg := o.cfg.Load()\n\tdc := o.dynCfg.Load()\n\tif dc == nil {\n\t\treturn\n\t}\n\n\t// Type assert to gNMI SubscribeResponse\n\tsubsResp, ok := rsp.(*gnmi.SubscribeResponse)\n\tif !ok {\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"received non-SubscribeResponse message, ignoring\")\n\t\t}\n\t\treturn\n\t}\n\n\t// Convert gNMI response to EventMsg format\n\tsubscriptionName := meta[\"subscription-name\"]\n\tif subscriptionName == \"\" {\n\t\tsubscriptionName = \"default\"\n\t}\n\n\tevents, err := formatters.ResponseToEventMsgs(subscriptionName, subsResp, meta, dc.evps...)\n\tif err != nil {\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"failed to convert response to events: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\t// Send events to worker channel\n\teventCh := *o.eventCh.Load()\n\tfor _, event := range events {\n\t\tselect {\n\t\tcase eventCh <- event:\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif cfg.Debug {\n\t\t\t\to.logger.Printf(\"event channel full, dropping event\")\n\t\t\t}\n\t\t\tif cfg.EnableMetrics {\n\t\t\t\totlpNumberOfFailedEvents.WithLabelValues(cfg.Name, \"channel_full\").Inc()\n\t\t\t}\n\t\t}\n\t}\n}\n\n// WriteEvent handles individual EventMsg\nfunc (o *otlpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tif ev == nil {\n\t\treturn\n\t}\n\n\tcfg := o.cfg.Load()\n\teventCh := *o.eventCh.Load()\n\n\tselect {\n\tcase eventCh <- ev:\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tif cfg.Debug {\n\t\t\to.logger.Printf(\"event channel full, dropping event\")\n\t\t}\n\t\tif cfg.EnableMetrics {\n\t\t\totlpNumberOfFailedEvents.WithLabelValues(cfg.Name, \"channel_full\").Inc()\n\t\t}\n\t}\n}\n\n// Close closes the OTLP output\nfunc (o *otlpOutput) Close() error {\n\tif o.cancelFn != nil {\n\t\to.cancelFn()\n\t}\n\n\t// Close event channel\n\teventCh := o.eventCh.Load()\n\tif eventCh != nil {\n\t\tclose(*eventCh)\n\t}\n\n\t// Wait for workers to finish\n\to.wg.Wait()\n\n\t// Close gRPC connection\n\tgs := o.grpcState.Load()\n\tif gs != nil && gs.conn != nil {\n\t\treturn gs.conn.Close()\n\t}\n\n\treturn nil\n}\n\n// worker processes events in batches\nfunc (o *otlpOutput) worker(ctx context.Context, id int) {\n\tdefer o.wg.Done()\n\n\tcfg := o.cfg.Load()\n\tif cfg.Debug {\n\t\to.logger.Printf(\"worker %d started\", id)\n\t}\n\n\tbatch := make([]*formatters.EventMsg, 0, cfg.BatchSize)\n\tticker := time.NewTicker(cfg.Interval)\n\tdefer ticker.Stop()\n\n\teventCh := *o.eventCh.Load()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tif len(batch) > 0 {\n\t\t\t\tflushCtx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)\n\t\t\t\tdefer cancel()\n\t\t\t\to.sendBatch(flushCtx, batch)\n\t\t\t}\n\t\t\tif cfg.Debug {\n\t\t\t\to.logger.Printf(\"worker %d stopped\", id)\n\t\t\t}\n\t\t\treturn\n\n\t\tcase event, ok := <-eventCh:\n\t\t\tif !ok {\n\t\t\t\tif len(batch) > 0 {\n\t\t\t\t\tflushCtx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)\n\t\t\t\t\tdefer cancel()\n\t\t\t\t\to.sendBatch(flushCtx, batch)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbatch = append(batch, event)\n\t\t\tif len(batch) >= cfg.BatchSize {\n\t\t\t\to.sendBatch(ctx, batch)\n\t\t\t\tbatch = make([]*formatters.EventMsg, 0, cfg.BatchSize)\n\t\t\t}\n\n\t\tcase <-ticker.C:\n\t\t\tif len(batch) > 0 {\n\t\t\t\to.sendBatch(ctx, batch)\n\t\t\t\tbatch = make([]*formatters.EventMsg, 0, cfg.BatchSize)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (o *otlpOutput) sendBatch(ctx context.Context, events []*formatters.EventMsg) {\n\tif len(events) == 0 {\n\t\treturn\n\t}\n\n\tcfg := o.cfg.Load()\n\tstart := time.Now()\n\n\treq := o.convertToOTLP(events)\n\n\tvar err error\n\tfor attempt := 0; attempt <= cfg.MaxRetries; attempt++ {\n\t\terr = o.sendGRPC(ctx, req)\n\n\t\tif err == nil {\n\t\t\tif cfg.Debug {\n\t\t\t\to.logger.Printf(\"successfully sent %d events (attempt %d)\", len(events), attempt+1)\n\t\t\t}\n\t\t\tif cfg.EnableMetrics {\n\t\t\t\totlpNumberOfSentEvents.WithLabelValues(cfg.Name).Add(float64(len(events)))\n\t\t\t\totlpSendDuration.WithLabelValues(cfg.Name).Observe(time.Since(start).Seconds())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tif attempt < cfg.MaxRetries {\n\t\t\ttime.Sleep(time.Duration(attempt+1) * 100 * time.Millisecond)\n\t\t}\n\t}\n\n\to.logger.Printf(\"failed to send batch after %d retries: %v\", cfg.MaxRetries, err)\n\tif cfg.EnableMetrics {\n\t\totlpNumberOfFailedEvents.WithLabelValues(cfg.Name, \"send_failed\").Add(float64(len(events)))\n\t}\n}\n\nfunc (o *otlpOutput) setDefaultsFor(c *config) {\n\tif c.Timeout == 0 {\n\t\tc.Timeout = defaultTimeout\n\t}\n\tif c.BatchSize == 0 {\n\t\tc.BatchSize = defaultBatchSize\n\t}\n\tif c.NumWorkers == 0 {\n\t\tc.NumWorkers = defaultNumWorkers\n\t}\n\tif c.MaxRetries == 0 {\n\t\tc.MaxRetries = defaultMaxRetries\n\t}\n\tif c.Protocol == \"\" {\n\t\tc.Protocol = defaultProtocol\n\t}\n\tif c.Name == \"\" {\n\t\tc.Name = \"gnmic-otlp-\" + uuid.New().String()\n\t}\n\tif c.Interval == 0 {\n\t\tc.Interval = 5 * time.Second\n\t}\n\tif c.BufferSize == 0 {\n\t\tc.BufferSize = c.BatchSize * 2\n\t}\n\tc.resourceTagSet = make(map[string]bool, len(c.ResourceTagKeys))\n\tfor _, k := range c.ResourceTagKeys {\n\t\tc.resourceTagSet[k] = true\n\t}\n}\n\nfunc (o *otlpOutput) validateConfig(c *config) error {\n\tif c.Endpoint == \"\" {\n\t\treturn fmt.Errorf(\"endpoint is required\")\n\t}\n\tc.counterRegexes = make([]*regexp.Regexp, 0, len(c.CounterPatterns))\n\tfor _, p := range c.CounterPatterns {\n\t\tre, err := regexp.Compile(p)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid counter-pattern %q: %w\", p, err)\n\t\t}\n\t\tc.counterRegexes = append(c.counterRegexes, re)\n\t}\n\treturn nil\n}\n\nfunc (o *otlpOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(o.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(o.logger, cfg.EventProcessors, ps, tcs, acts)\n}\n\nfunc (o *otlpOutput) initGRPCFor(cfg *config) (*grpcClientState, error) {\n\tvar opts []grpc.DialOption\n\n\tif cfg.TLS != nil {\n\t\ttlsConfig, err := o.createTLSConfigFor(cfg)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create TLS config: %w\", err)\n\t\t}\n\t\topts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))\n\t} else {\n\t\topts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))\n\t}\n\n\tconn, err := grpc.NewClient(cfg.Endpoint, opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create OTLP client: %w\", err)\n\t}\n\n\to.logger.Printf(\"initialized OTLP gRPC client for endpoint: %s\", cfg.Endpoint)\n\treturn &grpcClientState{\n\t\tconn:   conn,\n\t\tclient: metricsv1.NewMetricsServiceClient(conn),\n\t}, nil\n}\n\nfunc (o *otlpOutput) createTLSConfigFor(cfg *config) (*tls.Config, error) {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: cfg.TLS.SkipVerify,\n\t}\n\n\tif cfg.TLS.CaFile != \"\" || cfg.TLS.CertFile != \"\" {\n\t\treturn utils.NewTLSConfig(\n\t\t\tcfg.TLS.CaFile,\n\t\t\tcfg.TLS.CertFile,\n\t\t\tcfg.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tcfg.TLS.SkipVerify,\n\t\t\tfalse,\n\t\t)\n\t}\n\n\treturn tlsConfig, nil\n}\n\nfunc (o *otlpOutput) registerMetrics() error {\n\tcfg := o.cfg.Load()\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\n\tif o.reg == nil {\n\t\treturn nil\n\t}\n\n\tif err := o.reg.Register(otlpNumberOfSentEvents); err != nil {\n\t\treturn err\n\t}\n\tif err := o.reg.Register(otlpNumberOfFailedEvents); err != nil {\n\t\treturn err\n\t}\n\tif err := o.reg.Register(otlpSendDuration); err != nil {\n\t\treturn err\n\t}\n\tif err := o.reg.Register(otlpRejectedDataPoints); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Helper functions for detecting config changes\n\nfunc channelNeedsSwap(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\told.BatchSize != nw.BatchSize ||\n\t\told.Interval != nw.Interval\n}\n\nfunc needsGRPCRebuild(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.Endpoint != nw.Endpoint ||\n\t\told.Protocol != nw.Protocol ||\n\t\t!old.TLS.Equal(nw.TLS)\n}\n"
  },
  {
    "path": "pkg/outputs/otlp_output/otlp_output_test.go",
    "content": "// © 2025-2026 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage otlp_output\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tmetricsv1 \"go.opentelemetry.io/proto/otlp/collector/metrics/v1\"\n\tcommonpb \"go.opentelemetry.io/proto/otlp/common/v1\"\n\t\"google.golang.org/grpc\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"github.com/zestor-dev/zestor/store\"\n\t\"github.com/zestor-dev/zestor/store/gomap\"\n)\n\n// newTestOutput creates an otlpOutput suitable for converter tests (no Init required).\nfunc newTestOutput(cfg *config) *otlpOutput {\n\tcfg.resourceTagSet = make(map[string]bool, len(cfg.ResourceTagKeys))\n\tfor _, k := range cfg.ResourceTagKeys {\n\t\tcfg.resourceTagSet[k] = true\n\t}\n\tcfg.counterRegexes = make([]*regexp.Regexp, 0, len(cfg.CounterPatterns))\n\tfor _, p := range cfg.CounterPatterns {\n\t\tcfg.counterRegexes = append(cfg.counterRegexes, regexp.MustCompile(p))\n\t}\n\to := &otlpOutput{}\n\to.cfg = new(atomic.Pointer[config])\n\to.cfg.Store(cfg)\n\to.logger = log.New(io.Discard, \"\", 0)\n\treturn o\n}\n\n// Test 1: OTLP Message Structure\nfunc TestOTLP_MessageStructure(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\t// Test that gNMI metrics convert to proper OTLP structure\n\t// Given: gNMI metric update\n\tevent := &formatters.EventMsg{\n\t\tName:      \"interfaces_interface_state_counters_in_octets\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"interface_name\": \"Ethernet1\",\n\t\t\t\"source\":         \"10.1.1.1:6030\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(1234567890),\n\t\t},\n\t}\n\n\t// When: Converting to OTLP\n\toutput := newTestOutput(&config{\n\t\tEndpoint: \"localhost:4317\",\n\t})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t// Then: Should have proper OTLP structure\n\trequire.NotNil(t, otlpMetrics)\n\trequire.Equal(t, 1, len(otlpMetrics.ResourceMetrics))\n\trequire.Equal(t, 1, len(otlpMetrics.ResourceMetrics[0].ScopeMetrics))\n\n\tmetric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0]\n\tassert.Equal(t, \"interfaces_interface_state_counters_in_octets\", metric.Name)\n\n\t// Verify it's a Sum (monotonic counter)\n\tassert.NotNil(t, metric.GetSum())\n\tassert.True(t, metric.GetSum().IsMonotonic)\n}\n\n// Test 2: Resource Attributes\nfunc TestOTLP_ResourceAttributes(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\t// Test that device metadata becomes OTLP resource attributes\n\t// Given: gNMI update with device metadata\n\tevent := &formatters.EventMsg{\n\t\tName:      \"interfaces_interface_state_counters_in_octets\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"device\": \"switch1-jhb01\",\n\t\t\t\"vendor\": \"arista\",\n\t\t\t\"site\":   \"jhb01\",\n\t\t\t\"source\": \"10.1.1.1:6030\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(100),\n\t\t},\n\t}\n\n\t// When: Converting to OTLP\n\toutput := newTestOutput(&config{})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t// Then: Resource attributes should match metadata\n\tresource := otlpMetrics.ResourceMetrics[0].Resource\n\tassert.Equal(t, \"switch1-jhb01\", getAttributeValue(resource, \"device\"))\n\tassert.Equal(t, \"arista\", getAttributeValue(resource, \"vendor\"))\n\tassert.Equal(t, \"jhb01\", getAttributeValue(resource, \"site\"))\n\tassert.Equal(t, \"10.1.1.1:6030\", getAttributeValue(resource, \"source\"))\n}\n\n// Test 3: Metric Attributes from Path Keys\nfunc TestOTLP_PathKeysAsAttributes(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\t// Test that gNMI path keys become OTLP metric attributes\n\t// Given: Event with path key as tag\n\tevent := &formatters.EventMsg{\n\t\tName:      \"interfaces_interface_state_counters_in_octets\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"interface_name\": \"Ethernet1\",\n\t\t\t\"source\":         \"10.1.1.1:6030\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(999),\n\t\t},\n\t}\n\n\t// When: Converting to OTLP\n\toutput := newTestOutput(&config{})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t// Then: Path key becomes attribute\n\tdataPoint := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0].GetSum().DataPoints[0]\n\tassert.Equal(t, \"Ethernet1\", getDataPointAttribute(dataPoint, \"interface_name\"))\n}\n\n// Test 4: Metric Type Detection\nfunc TestOTLP_MetricTypeDetection(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\ttests := []struct {\n\t\tname              string\n\t\tmetricName        string\n\t\tvalue             interface{}\n\t\texpectedType      string // \"Sum\" or \"Gauge\"\n\t\texpectedMonotonic bool\n\t}{\n\t\t{\n\t\t\tname:              \"counter metric\",\n\t\t\tmetricName:        \"interfaces_interface_state_counters_in_octets\",\n\t\t\tvalue:             int64(1000),\n\t\t\texpectedType:      \"Sum\",\n\t\t\texpectedMonotonic: true,\n\t\t},\n\t\t{\n\t\t\tname:              \"gauge metric - temperature\",\n\t\t\tmetricName:        \"components_component_temperature_instant\",\n\t\t\tvalue:             45.5,\n\t\t\texpectedType:      \"Gauge\",\n\t\t\texpectedMonotonic: false,\n\t\t},\n\t\t{\n\t\t\tname:              \"gauge metric - status\",\n\t\t\tmetricName:        \"interfaces_interface_state_oper_status\",\n\t\t\tvalue:             \"up\",\n\t\t\texpectedType:      \"Gauge\",\n\t\t\texpectedMonotonic: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tevent := &formatters.EventMsg{\n\t\t\t\tName:      tt.metricName,\n\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"value\": tt.value,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toutput := newTestOutput(&config{})\n\t\t\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\t\t\tmetric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0]\n\n\t\t\tswitch tt.expectedType {\n\t\t\tcase \"Sum\":\n\t\t\t\tassert.NotNil(t, metric.GetSum())\n\t\t\t\tassert.Equal(t, tt.expectedMonotonic, metric.GetSum().IsMonotonic)\n\t\t\tcase \"Gauge\":\n\t\t\t\tassert.NotNil(t, metric.GetGauge())\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test 5: gRPC Transport\nfunc TestOTLP_GRPCTransport(t *testing.T) {\n\tserver, endpoint := startMockOTLPServer(t)\n\tdefer server.Stop()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":   endpoint,\n\t\t\"protocol\":   \"grpc\",\n\t\t\"timeout\":    \"5s\",\n\t\t\"batch-size\": 1,\n\t\t\"interval\":   \"100ms\",\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\tdefer output.Close()\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(200 * time.Millisecond)\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0)\n}\n\n// Test 6: Configuration Validation\nfunc TestOTLP_ConfigValidation(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\ttests := []struct {\n\t\tname        string\n\t\tconfig      map[string]interface{}\n\t\texpectError bool\n\t\terrorMsg    string\n\t}{\n\t\t{\n\t\t\tname: \"valid gRPC config\",\n\t\t\tconfig: map[string]interface{}{\n\t\t\t\t\"endpoint\": \"localhost:4317\",\n\t\t\t\t\"protocol\": \"grpc\",\n\t\t\t},\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"valid HTTP config\",\n\t\t\tconfig: map[string]interface{}{\n\t\t\t\t\"endpoint\": \"http://localhost:4318\",\n\t\t\t\t\"protocol\": \"http\",\n\t\t\t},\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"missing endpoint\",\n\t\t\tconfig: map[string]interface{}{\n\t\t\t\t\"protocol\": \"grpc\",\n\t\t\t},\n\t\t\texpectError: true,\n\t\t\terrorMsg:    \"endpoint is required\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid protocol\",\n\t\t\tconfig: map[string]interface{}{\n\t\t\t\t\"endpoint\": \"localhost:4317\",\n\t\t\t\t\"protocol\": \"invalid\",\n\t\t\t},\n\t\t\texpectError: true,\n\t\t\terrorMsg:    \"unsupported protocol\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\toutput := &otlpOutput{}\n\t\t\terr := output.Init(context.Background(), \"test-otlp\", tt.config)\n\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tif tt.errorMsg != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errorMsg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\toutput.Close()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test 7: String Values as Attributes\nfunc TestOTLP_StringValuesAsAttributes(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\t// Test strings-as-attributes conversion\n\t// Given: String value metric\n\tevent := &formatters.EventMsg{\n\t\tName:      \"interfaces_interface_state_oper_status\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"interface_name\": \"Ethernet1\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": \"up\",\n\t\t},\n\t}\n\n\t// When: Converting with strings-as-attributes enabled\n\toutput := newTestOutput(&config{StringsAsAttributes: true})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t// Then: Should create gauge with value=1 and status as attribute\n\tmetric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0]\n\tgauge := metric.GetGauge()\n\trequire.NotNil(t, gauge)\n\n\tdataPoint := gauge.DataPoints[0]\n\tassert.Equal(t, float64(1), dataPoint.GetAsDouble())\n\tassert.Equal(t, \"up\", getDataPointAttribute(dataPoint, \"value\"))\n}\n\n// Test 8: Subscription Name Mapping\nfunc TestOTLP_SubscriptionNameMapping(t *testing.T) {\n\tt.Skip(\"Implementation pending\")\n\n\t// Test that subscription names become resource attributes\n\t// Given: Event with subscription name\n\tevent := &formatters.EventMsg{\n\t\tName:      \"interfaces_interface_state_counters_in_octets\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"subscription_name\": \"arista\",\n\t\t\t\"source\":            \"10.1.1.1:6030\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(100),\n\t\t},\n\t}\n\n\t// When: Converting with append-subscription-name enabled\n\toutput := newTestOutput(&config{AppendSubscriptionName: true})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t// Then: subscription_name should be in attributes\n\tdataPoint := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0].GetSum().DataPoints[0]\n\tassert.Equal(t, \"arista\", getDataPointAttribute(dataPoint, \"subscription_name\"))\n}\n\n// TestBuildMetricName_StripLeadingUnderscore verifies the strip-leading-underscore config option.\n// gNMI paths arrive with a leading \"/\" (see pkg/formatters/event.go updateToEvent), which the\n// slash->underscore conversion turns into a leading \"_\". This test pins both the backward-compatible\n// default (option off) and the new behavior (option on).\nfunc TestBuildMetricName_StripLeadingUnderscore(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tcfg      *config\n\t\tevent    *formatters.EventMsg\n\t\tvalueKey string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"default preserves leading underscore (backward compat)\",\n\t\t\tcfg:      &config{},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"/interfaces/interface/state/counters/in-octets\",\n\t\t\texpected: \"_interfaces_interface_state_counters_in_octets\",\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled removes leading underscore\",\n\t\t\tcfg:      &config{StripLeadingUnderscore: true},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"/interfaces/interface/state/counters/in-octets\",\n\t\t\texpected: \"interfaces_interface_state_counters_in_octets\",\n\t\t},\n\t\t{\n\t\t\tname:     \"disabled with metric-prefix yields double underscore (backward compat)\",\n\t\t\tcfg:      &config{MetricPrefix: \"gnmic\"},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"/interfaces/interface/state/counters/in-octets\",\n\t\t\texpected: \"gnmic__interfaces_interface_state_counters_in_octets\",\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled with metric-prefix has single underscore separator\",\n\t\t\tcfg:      &config{StripLeadingUnderscore: true, MetricPrefix: \"gnmic\"},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"/interfaces/interface/state/counters/in-octets\",\n\t\t\texpected: \"gnmic_interfaces_interface_state_counters_in_octets\",\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled with append-subscription-name has single underscore separator\",\n\t\t\tcfg:      &config{StripLeadingUnderscore: true, AppendSubscriptionName: true},\n\t\t\tevent:    &formatters.EventMsg{Name: \"arista\"},\n\t\t\tvalueKey: \"/interfaces/interface/state/counters/in-octets\",\n\t\t\texpected: \"arista_interfaces_interface_state_counters_in_octets\",\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled does not touch non-leading underscores\",\n\t\t\tcfg:      &config{StripLeadingUnderscore: true},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"/a_b/c\",\n\t\t\texpected: \"a_b_c\",\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled is a no-op when path has no leading slash\",\n\t\t\tcfg:      &config{StripLeadingUnderscore: true},\n\t\t\tevent:    &formatters.EventMsg{Name: \"sub1\"},\n\t\t\tvalueKey: \"interfaces/interface\",\n\t\t\texpected: \"interfaces_interface\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\toutput := newTestOutput(tt.cfg)\n\t\t\tgot := output.buildMetricName(tt.cfg, tt.event, tt.valueKey)\n\t\t\tassert.Equal(t, tt.expected, got)\n\t\t})\n\t}\n}\n\n// Helper functions\n\nfunc createTestEvent() *formatters.EventMsg {\n\treturn &formatters.EventMsg{\n\t\tName:      \"test_metric\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"source\": \"test:1234\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(42),\n\t\t},\n\t}\n}\n\nfunc getAttributeValue(resource interface{}, key string) string {\n\t// Helper to extract attribute value from resource\n\t// Will implement when we have the actual OTLP structures\n\treturn \"\"\n}\n\nfunc getDataPointAttribute(dataPoint interface{}, key string) string {\n\t// Helper to extract attribute value from data point\n\t// Will implement when we have the actual OTLP structures\n\treturn \"\"\n}\n\n// Mock OTLP server for testing\ntype mockOTLPServer struct {\n\tmetricsv1.UnimplementedMetricsServiceServer\n\tgrpcServer *grpc.Server\n\tlistener   net.Listener\n\n\tm            sync.Mutex\n\tmetricsCount int\n\treceivedReqs []*metricsv1.ExportMetricsServiceRequest\n}\n\nfunc startMockOTLPServer(t *testing.T) (*mockOTLPServer, string) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tserver := grpc.NewServer()\n\tmock := &mockOTLPServer{\n\t\tgrpcServer: server,\n\t\tlistener:   listener,\n\t}\n\n\tmetricsv1.RegisterMetricsServiceServer(server, mock)\n\n\tgo server.Serve(listener)\n\n\treturn mock, listener.Addr().String()\n}\n\nfunc startMockOTLPServerOnAddress(t *testing.T, addr string) (*mockOTLPServer, string) {\n\tlistener, err := net.Listen(\"tcp\", addr)\n\trequire.NoError(t, err)\n\n\tserver := grpc.NewServer()\n\tmock := &mockOTLPServer{\n\t\tgrpcServer: server,\n\t\tlistener:   listener,\n\t}\n\n\tmetricsv1.RegisterMetricsServiceServer(server, mock)\n\tgo server.Serve(listener)\n\n\treturn mock, listener.Addr().String()\n}\n\nfunc (m *mockOTLPServer) Export(ctx context.Context, req *metricsv1.ExportMetricsServiceRequest) (*metricsv1.ExportMetricsServiceResponse, error) {\n\tif err := ctx.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\tm.m.Lock()\n\tdefer m.m.Unlock()\n\tm.receivedReqs = append(m.receivedReqs, req)\n\tm.metricsCount += len(req.ResourceMetrics)\n\treturn &metricsv1.ExportMetricsServiceResponse{}, nil\n}\n\nfunc (m *mockOTLPServer) ReceivedMetricsCount() int {\n\tm.m.Lock()\n\tdefer m.m.Unlock()\n\treturn m.metricsCount\n}\n\nfunc (m *mockOTLPServer) Stop() {\n\tm.grpcServer.Stop()\n\tm.listener.Close()\n}\n\n// Test 9: Resource Tag Keys control data point vs resource attribute placement\nfunc TestOTLP_ResourceTagKeys(t *testing.T) {\n\ttests := []struct {\n\t\tname                   string\n\t\tresourceTagKeys        []string\n\t\teventTags              map[string]string\n\t\texpectedInDataPoint    []string\n\t\texpectedNotInDataPoint []string\n\t}{\n\t\t{\n\t\t\tname:            \"empty resource-tag-keys: all tags become data point attributes\",\n\t\t\tresourceTagKeys: []string{},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\":            \"nvswitch1-nvl9-gp1-jhb01\",\n\t\t\t\t\"vendor\":            \"nvidia\",\n\t\t\t\t\"model\":             \"nvos\",\n\t\t\t\t\"interface_name\":    \"Ethernet1\",\n\t\t\t\t\"subscription_name\": \"nvos\",\n\t\t\t},\n\t\t\texpectedInDataPoint:    []string{\"device\", \"vendor\", \"model\", \"interface_name\", \"subscription_name\"},\n\t\t\texpectedNotInDataPoint: []string{},\n\t\t},\n\t\t{\n\t\t\tname:            \"default resource-tag-keys: device/vendor/model/site/source excluded from data point\",\n\t\t\tresourceTagKeys: []string{\"device\", \"vendor\", \"model\", \"site\", \"source\"},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\":         \"nvswitch1-nvl9-gp1-jhb01\",\n\t\t\t\t\"vendor\":         \"nvidia\",\n\t\t\t\t\"model\":          \"nvos\",\n\t\t\t\t\"interface_name\": \"Ethernet1\",\n\t\t\t},\n\t\t\texpectedInDataPoint:    []string{\"interface_name\"},\n\t\t\texpectedNotInDataPoint: []string{\"device\", \"vendor\", \"model\"},\n\t\t},\n\t\t{\n\t\t\tname:            \"custom resource-tag-keys: only specified keys excluded\",\n\t\t\tresourceTagKeys: []string{\"source\"},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\":         \"nvswitch1\",\n\t\t\t\t\"vendor\":         \"nvidia\",\n\t\t\t\t\"source\":         \"10.0.0.1\",\n\t\t\t\t\"interface_name\": \"Ethernet1\",\n\t\t\t},\n\t\t\texpectedInDataPoint:    []string{\"device\", \"vendor\", \"interface_name\"},\n\t\t\texpectedNotInDataPoint: []string{\"source\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tevent := &formatters.EventMsg{\n\t\t\t\tName:      \"test_metric\",\n\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\tTags:      tt.eventTags,\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"value\": int64(100),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toutput := newTestOutput(&config{\n\t\t\t\tResourceTagKeys: tt.resourceTagKeys,\n\t\t\t})\n\t\t\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t\t\trequire.NotNil(t, otlpMetrics)\n\t\t\trequire.Len(t, otlpMetrics.ResourceMetrics, 1)\n\t\t\trequire.Len(t, otlpMetrics.ResourceMetrics[0].ScopeMetrics, 1)\n\t\t\trequire.Len(t, otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics, 1)\n\n\t\t\tmetric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0]\n\t\t\tvar dataPointAttrs map[string]string\n\n\t\t\tif metric.GetGauge() != nil {\n\t\t\t\tdataPointAttrs = extractAttributesMap(metric.GetGauge().DataPoints[0].Attributes)\n\t\t\t} else if metric.GetSum() != nil {\n\t\t\t\tdataPointAttrs = extractAttributesMap(metric.GetSum().DataPoints[0].Attributes)\n\t\t\t} else {\n\t\t\t\tt.Fatal(\"Metric has neither Gauge nor Sum data\")\n\t\t\t}\n\n\t\t\tfor _, key := range tt.expectedInDataPoint {\n\t\t\t\tassert.Contains(t, dataPointAttrs, key, \"Expected tag '%s' to be in data point attributes\", key)\n\t\t\t\tassert.Equal(t, tt.eventTags[key], dataPointAttrs[key], \"Tag '%s' value mismatch\", key)\n\t\t\t}\n\n\t\t\tfor _, key := range tt.expectedNotInDataPoint {\n\t\t\t\tassert.NotContains(t, dataPointAttrs, key, \"Tag '%s' should NOT be in data point attributes\", key)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test 10: Resource Attributes Behavior with ResourceTagKeys\nfunc TestOTLP_ResourceAttributesBehavior(t *testing.T) {\n\ttests := []struct {\n\t\tname                string\n\t\tresourceTagKeys     []string\n\t\teventTags           map[string]string\n\t\texpectInResource    []string\n\t\texpectNotInResource []string\n\t}{\n\t\t{\n\t\t\tname:            \"empty resource-tag-keys: no tags in resource\",\n\t\t\tresourceTagKeys: []string{},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\": \"nvswitch1\",\n\t\t\t\t\"vendor\": \"nvidia\",\n\t\t\t},\n\t\t\texpectInResource:    []string{},\n\t\t\texpectNotInResource: []string{\"device\", \"vendor\"},\n\t\t},\n\t\t{\n\t\t\tname:            \"default resource-tag-keys: device/vendor in resource\",\n\t\t\tresourceTagKeys: []string{\"device\", \"vendor\", \"model\", \"site\", \"source\"},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\": \"nvswitch1\",\n\t\t\t\t\"vendor\": \"nvidia\",\n\t\t\t},\n\t\t\texpectInResource:    []string{\"device\", \"vendor\"},\n\t\t\texpectNotInResource: []string{},\n\t\t},\n\t\t{\n\t\t\tname:            \"custom resource-tag-keys: only source in resource\",\n\t\t\tresourceTagKeys: []string{\"source\"},\n\t\t\teventTags: map[string]string{\n\t\t\t\t\"device\": \"nvswitch1\",\n\t\t\t\t\"vendor\": \"nvidia\",\n\t\t\t\t\"source\": \"10.0.0.1\",\n\t\t\t},\n\t\t\texpectInResource:    []string{\"source\"},\n\t\t\texpectNotInResource: []string{\"device\", \"vendor\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tevent := &formatters.EventMsg{\n\t\t\t\tName:      \"test_metric\",\n\t\t\t\tTimestamp: time.Now().UnixNano(),\n\t\t\t\tTags:      tt.eventTags,\n\t\t\t\tValues: map[string]interface{}{\n\t\t\t\t\t\"value\": int64(100),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toutput := newTestOutput(&config{\n\t\t\t\tResourceTagKeys: tt.resourceTagKeys,\n\t\t\t})\n\t\t\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\t\t\trequire.NotNil(t, otlpMetrics)\n\t\t\tresource := otlpMetrics.ResourceMetrics[0].Resource\n\t\t\tresourceAttrs := extractAttributesMap(resource.Attributes)\n\n\t\t\tfor _, key := range tt.expectInResource {\n\t\t\t\tassert.Contains(t, resourceAttrs, key, \"Expected tag '%s' in resource attributes\", key)\n\t\t\t}\n\n\t\t\tfor _, key := range tt.expectNotInResource {\n\t\t\t\tassert.NotContains(t, resourceAttrs, key, \"Tag '%s' should NOT be in resource attributes\", key)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test 11: Configured Resource Attributes Always Included\nfunc TestOTLP_ConfiguredResourceAttributesAlwaysIncluded(t *testing.T) {\n\tevent := &formatters.EventMsg{\n\t\tName:      \"test_metric\",\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: map[string]string{\n\t\t\t\"device\": \"nvswitch1\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"value\": int64(100),\n\t\t},\n\t}\n\n\toutput := newTestOutput(&config{\n\t\tResourceTagKeys: []string{},\n\t\tResourceAttributes: map[string]string{\n\t\t\t\"service.name\":    \"gnmic-collector\",\n\t\t\t\"service.version\": \"0.42.0\",\n\t\t},\n\t})\n\totlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event})\n\n\tresource := otlpMetrics.ResourceMetrics[0].Resource\n\tresourceAttrs := extractAttributesMap(resource.Attributes)\n\n\t// Configured resource attributes should always be present\n\tassert.Equal(t, \"gnmic-collector\", resourceAttrs[\"service.name\"])\n\tassert.Equal(t, \"0.42.0\", resourceAttrs[\"service.version\"])\n}\n\n// Test 12: Init succeeds even when endpoint is unreachable\nfunc TestOTLP_InitSucceedsWithUnreachableEndpoint(t *testing.T) {\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\": \"unreachable-host:4317\",\n\t\t\"protocol\": \"grpc\",\n\t\t\"timeout\":  \"1s\",\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\tassert.NoError(t, err, \"Init should succeed even with unreachable endpoint\")\n\n\tgs := output.grpcState.Load()\n\tassert.NotNil(t, gs, \"gRPC state should be created\")\n\tassert.NotNil(t, gs.conn, \"gRPC connection should be created\")\n\tassert.NotNil(t, gs.client, \"gRPC client should be created\")\n\n\toutput.Close()\n}\n\n// Test 13: Connection happens lazily on first RPC\nfunc TestOTLP_ConnectionOnFirstRPC(t *testing.T) {\n\tserver, endpoint := startMockOTLPServer(t)\n\tdefer server.Stop()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":   endpoint,\n\t\t\"protocol\":   \"grpc\",\n\t\t\"timeout\":    \"5s\",\n\t\t\"batch-size\": 1,\n\t\t\"interval\":   \"100ms\",\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\tdefer output.Close()\n\n\tassert.Equal(t, 0, server.ReceivedMetricsCount(), \"No metrics should be sent yet\")\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(200 * time.Millisecond)\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0, \"Metrics should be sent on first RPC\")\n}\n\n// Test 14: Retry behavior with delayed endpoint availability\nfunc TestOTLP_ReconnectWhenEndpointBecomesAvailable(t *testing.T) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\tendpoint := listener.Addr().String()\n\tlistener.Close()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":    endpoint,\n\t\t\"protocol\":    \"grpc\",\n\t\t\"timeout\":     \"2s\",\n\t\t\"batch-size\":  1,\n\t\t\"interval\":    \"200ms\",\n\t\t\"max-retries\": 10,\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr = output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\tdefer output.Close()\n\n\tserver, newEndpoint := startMockOTLPServerOnAddress(t, endpoint)\n\tdefer server.Stop()\n\tassert.Equal(t, endpoint, newEndpoint)\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(500 * time.Millisecond)\n\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0, \"Should successfully send after endpoint becomes available\")\n}\n\n// Test 15: Graceful shutdown flushes remaining batch\nfunc TestOTLP_GracefulShutdownFlushes(t *testing.T) {\n\tserver, endpoint := startMockOTLPServer(t)\n\tdefer server.Stop()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":   endpoint,\n\t\t\"protocol\":   \"grpc\",\n\t\t\"timeout\":    \"5s\",\n\t\t\"batch-size\": 100,\n\t\t\"interval\":   \"10s\",\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(100 * time.Millisecond)\n\tassert.Equal(t, 0, server.ReceivedMetricsCount(), \"Batch should not be sent yet (batch size not reached)\")\n\n\toutput.Close()\n\n\ttime.Sleep(200 * time.Millisecond)\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0, \"Remaining batch should be flushed on shutdown\")\n}\n\n// Test 16: Context cancellation sends final batch with fresh context\nfunc TestOTLP_ContextCancellationFlushes(t *testing.T) {\n\tserver, endpoint := startMockOTLPServer(t)\n\tdefer server.Stop()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":   endpoint,\n\t\t\"protocol\":   \"grpc\",\n\t\t\"timeout\":    \"5s\",\n\t\t\"batch-size\": 100,\n\t\t\"interval\":   \"10s\",\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(ctx, \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(100 * time.Millisecond)\n\tassert.Equal(t, 0, server.ReceivedMetricsCount(), \"Batch should not be sent yet\")\n\n\tcancel()\n\n\ttime.Sleep(200 * time.Millisecond)\n\toutput.Close()\n\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0, \"Batch should be flushed even after context cancellation\")\n}\n\n// Test 17: Channel close flushes remaining batch\nfunc TestOTLP_ChannelCloseFlushes(t *testing.T) {\n\tserver, endpoint := startMockOTLPServer(t)\n\tdefer server.Stop()\n\n\tcfg := map[string]interface{}{\n\t\t\"endpoint\":   endpoint,\n\t\t\"protocol\":   \"grpc\",\n\t\t\"timeout\":    \"5s\",\n\t\t\"batch-size\": 100,\n\t\t\"interval\":   \"10s\",\n\t}\n\n\toutput := &otlpOutput{}\n\n\terr := output.Init(context.Background(), \"test-otlp\", cfg,\n\t\toutputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})),\n\t)\n\trequire.NoError(t, err)\n\n\tevent := createTestEvent()\n\toutput.WriteEvent(context.Background(), event)\n\toutput.WriteEvent(context.Background(), event)\n\toutput.WriteEvent(context.Background(), event)\n\n\ttime.Sleep(100 * time.Millisecond)\n\tassert.Equal(t, 0, server.ReceivedMetricsCount(), \"Batch should not be sent yet\")\n\n\tclose(*output.eventCh.Load())\n\n\ttime.Sleep(200 * time.Millisecond)\n\n\tassert.Greater(t, server.ReceivedMetricsCount(), 0, \"Remaining batch should be flushed when channel closes\")\n}\n\n// Helper to extract attributes map from KeyValue slice\nfunc extractAttributesMap(attrs []*commonpb.KeyValue) map[string]string {\n\tresult := make(map[string]string)\n\tfor _, attr := range attrs {\n\t\tif strVal := attr.Value.GetStringValue(); strVal != \"\" {\n\t\t\tresult[attr.Key] = strVal\n\t\t}\n\t}\n\treturn result\n}\n"
  },
  {
    "path": "pkg/outputs/output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// © 2025 NVIDIA Corporation\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage outputs\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n\n\t\"github.com/mitchellh/mapstructure\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"google.golang.org/protobuf/proto\"\n\t\"google.golang.org/protobuf/reflect/protoreflect\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t_ \"github.com/openconfig/gnmic/pkg/formatters/all\"\n\tpkgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\ntype Output interface {\n\t// initialize the output\n\tInit(context.Context, string, map[string]any, ...Option) error\n\t// validate the config\n\tValidate(map[string]any) error\n\t// update the config\n\tUpdate(context.Context, map[string]any) error\n\t// update a processor\n\tUpdateProcessor(string, map[string]any) error\n\t// write a protobuf message to the output\n\tWrite(context.Context, proto.Message, Meta)\n\t// write an event message to the output\n\tWriteEvent(context.Context, *formatters.EventMsg)\n\t// close the output\n\tClose() error\n\t// return a string representation of the output\n\tString() string\n}\n\ntype Initializer func() Output\n\nvar Outputs = map[string]Initializer{}\n\nvar OutputTypes = map[string]struct{}{\n\t\"file\":             {},\n\t\"influxdb\":         {},\n\t\"kafka\":            {},\n\t\"nats\":             {},\n\t\"otlp\":             {},\n\t\"prometheus\":       {},\n\t\"prometheus_write\": {},\n\t\"tcp\":              {},\n\t\"udp\":              {},\n\t\"gnmi\":             {},\n\t\"jetstream\":        {},\n\t\"snmp\":             {},\n\t\"asciigraph\":       {},\n}\n\nfunc Register(name string, initFn Initializer) {\n\tOutputs[name] = initFn\n}\n\nvar bytesBufferPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(bytes.Buffer)\n\t},\n}\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\ntype Meta map[string]string\n\nfunc DecodeConfig(src, dst any) error {\n\tdecoder, err := mapstructure.NewDecoder(\n\t\t&mapstructure.DecoderConfig{\n\t\t\tDecodeHook: mapstructure.StringToTimeDurationHookFunc(),\n\t\t\tResult:     dst,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn decoder.Decode(src)\n}\n\nfunc AddSubscriptionTarget(msg proto.Message, meta Meta, addTarget string, tpl *template.Template) (*gnmi.SubscribeResponse, error) {\n\tif addTarget == \"\" {\n\t\tif message, ok := msg.(*gnmi.SubscribeResponse); ok {\n\t\t\treturn message, nil\n\t\t}\n\t\treturn nil, nil\n\t}\n\tmsg = proto.Clone(msg)\n\tswitch trsp := msg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch rrsp := trsp.Response.(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\tif rrsp.Update.Prefix == nil {\n\t\t\t\trrsp.Update.Prefix = new(gnmi.Path)\n\t\t\t}\n\t\t\tswitch addTarget {\n\t\t\tcase \"overwrite\":\n\t\t\t\tsb := stringBuilderPool.Get().(*strings.Builder)\n\t\t\t\tdefer func() {\n\t\t\t\t\tsb.Reset()\n\t\t\t\t\tstringBuilderPool.Put(sb)\n\t\t\t\t}()\n\t\t\t\terr := tpl.Execute(sb, meta)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\trrsp.Update.Prefix.Target = sb.String()\n\t\t\t\treturn trsp, nil\n\t\t\tcase \"if-not-present\":\n\t\t\t\tif rrsp.Update.Prefix.Target == \"\" {\n\t\t\t\t\tsb := stringBuilderPool.Get().(*strings.Builder)\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tsb.Reset()\n\t\t\t\t\t\tstringBuilderPool.Put(sb)\n\t\t\t\t\t}()\n\t\t\t\t\terr := tpl.Execute(sb, meta)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\trrsp.Update.Prefix.Target = sb.String()\n\t\t\t\t}\n\t\t\t\treturn trsp, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc ExecTemplate(content []byte, tpl *template.Template) ([]byte, error) {\n\tvar input interface{}\n\terr := json.Unmarshal(content, &input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal input: %v\", err)\n\t}\n\tbf := bytesBufferPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbf.Reset()\n\t\tbytesBufferPool.Put(bf)\n\t}()\n\terr = tpl.Execute(bf, input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to execute msg template: %v\", err)\n\t}\n\tresult := bf.Bytes()\n\tout := make([]byte, len(result))\n\tcopy(out, result)\n\treturn out, nil\n}\n\nvar (\n\tDefaultTargetTemplate = template.Must(\n\t\ttemplate.New(\"target-template\").\n\t\t\tFuncs(TemplateFuncs).\n\t\t\tParse(defaultTargetTemplateString))\n\n\tTemplateFuncs = template.FuncMap{\n\t\t\"host\": utils.GetHost,\n\t}\n)\n\nconst (\n\tdefaultTargetTemplateString = `\n{{- if index . \"subscription-target\" -}}\n{{ index . \"subscription-target\" }}\n{{- else -}}\n{{ index . \"source\" | host }}\n{{- end -}}`\n)\n\nfunc Marshal(pmsg protoreflect.ProtoMessage, meta map[string]string, mo *formatters.MarshalOptions, splitEvents bool, evps ...formatters.EventProcessor) ([][]byte, error) {\n\tswitch mo.Format {\n\tcase \"event\":\n\t\tif splitEvents {\n\t\t\treturn marshalSplit(pmsg, meta, mo, evps...)\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\tb, err := mo.Marshal(pmsg, meta, evps...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(b) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn [][]byte{b}, nil\n\t}\n}\n\nfunc marshalSplit(pmsg protoreflect.ProtoMessage, meta map[string]string, mo *formatters.MarshalOptions, evps ...formatters.EventProcessor) ([][]byte, error) {\n\tvar subscriptionName string\n\tvar ok bool\n\tif subscriptionName, ok = meta[\"subscription-name\"]; !ok {\n\t\tsubscriptionName = \"default\"\n\t}\n\tswitch msg := pmsg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tswitch msg.GetResponse().(type) {\n\t\tcase *gnmi.SubscribeResponse_Update:\n\t\t\tevents, err := formatters.ResponseToEventMsgs(subscriptionName, msg, meta, evps...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed converting response to events: %v\", err)\n\t\t\t}\n\t\t\tnumEvents := len(events)\n\t\t\tif numEvents == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\trs := make([][]byte, 0, numEvents)\n\t\t\tmarshalFn := json.Marshal\n\t\t\tif mo.Multiline {\n\t\t\t\tmarshalFn = func(v any) ([]byte, error) {\n\t\t\t\t\treturn json.MarshalIndent(v, \"\", mo.Indent)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, ev := range events {\n\t\t\t\tb, err := marshalFn(ev)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\trs = append(rs, b)\n\t\t\t}\n\t\t\treturn rs, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected message type: %T\", msg)\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected message type: %T\", msg)\n\t}\n}\n\ntype BaseOutput struct {\n}\n\nfunc (b *BaseOutput) Init(context.Context, string, map[string]any, ...Option) error {\n\treturn nil\n}\n\nfunc (b *BaseOutput) Validate(map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseOutput) Update(context.Context, map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseOutput) UpdateProcessor(string, map[string]any) error {\n\treturn nil\n}\n\nfunc (b *BaseOutput) Write(context.Context, proto.Message, Meta) {}\n\nfunc (b *BaseOutput) WriteEvent(context.Context, *formatters.EventMsg) {}\n\nfunc (b *BaseOutput) Close() error {\n\treturn nil\n}\n\nfunc (b *BaseOutput) String() string {\n\treturn \"\"\n}\n\n// update processor helper\n\nfunc UpdateProcessorInSlice(\n\tlogger *log.Logger,\n\tstoreObj store.Store[any],\n\teventProcessors []string,\n\tcurrentEvps []formatters.EventProcessor,\n\tprocessorName string,\n\tpcfg map[string]any,\n) ([]formatters.EventProcessor, bool, error) {\n\ttcs, ps, acts, err := pkgutils.GetConfigMaps(storeObj)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tfor i, epName := range eventProcessors {\n\t\tif epName == processorName {\n\t\t\tep, err := formatters.MakeProcessor(logger, processorName, pcfg, ps, tcs, acts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, err\n\t\t\t}\n\n\t\t\tif i >= len(currentEvps) {\n\t\t\t\treturn nil, false, fmt.Errorf(\"output processors are not properly initialized\")\n\t\t\t}\n\n\t\t\t// create new slice with updated processor\n\t\t\tnewEvps := make([]formatters.EventProcessor, len(currentEvps))\n\t\t\tcopy(newEvps, currentEvps)\n\t\t\tnewEvps[i] = ep\n\n\t\t\tlogger.Printf(\"updated event processor %s\", processorName)\n\t\t\treturn newEvps, true, nil\n\t\t}\n\t}\n\n\t// processor not found - return currentEvps\n\treturn currentEvps, false, nil\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_common.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"cmp\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash/fnv\"\n\t\"math\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/prometheus/model/labels\"\n\t\"github.com/prometheus/prometheus/prompb\"\n)\n\nconst (\n\tmetricNameRegex   = \"[^a-zA-Z0-9_]+\"\n\tdefaultMetricHelp = \"gNMIc generated metric\"\n)\n\nvar (\n\tMetricNameRegex = regexp.MustCompile(metricNameRegex)\n)\n\nvar stringBuilderPool = sync.Pool{\n\tNew: func() any {\n\t\treturn new(strings.Builder)\n\t},\n}\n\ntype PromMetric struct {\n\tName string\n\tTime *time.Time\n\t// AddedAt is used to expire metrics if the time field is not initialized\n\t// this happens when ExportTimestamp == false\n\tAddedAt time.Time\n\n\tlabels []prompb.Label\n\tvalue  float64\n}\n\n// Metric\nfunc (p *PromMetric) CalculateKey() uint64 {\n\th := fnv.New64a()\n\th.Write([]byte(p.Name))\n\tif len(p.labels) > 0 {\n\t\th.Write([]byte(\":\"))\n\t\tsort.Slice(p.labels, func(i, j int) bool {\n\t\t\treturn p.labels[i].Name < p.labels[j].Name\n\t\t})\n\t\tfor _, label := range p.labels {\n\t\t\th.Write([]byte(label.Name))\n\t\t\th.Write([]byte(\":\"))\n\t\t\th.Write([]byte(label.Value))\n\t\t\th.Write([]byte(\":\"))\n\t\t}\n\t}\n\treturn h.Sum64()\n}\n\nfunc (p *PromMetric) String() string {\n\tif p == nil {\n\t\treturn \"\"\n\t}\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tsb.WriteString(\"name=\")\n\tsb.WriteString(p.Name)\n\tsb.WriteString(\",\")\n\tnumLabels := len(p.labels)\n\tif numLabels > 0 {\n\t\tsb.WriteString(\"labels=[\")\n\t\tfor i, lb := range p.labels {\n\t\t\tsb.WriteString(lb.Name)\n\t\t\tsb.WriteString(\"=\")\n\t\t\tsb.WriteString(lb.Value)\n\t\t\tif i < numLabels-1 {\n\t\t\t\tsb.WriteString(\",\")\n\t\t\t}\n\t\t}\n\t\tsb.WriteString(\"],\")\n\t}\n\tsb.WriteString(fmt.Sprintf(\"value=%f,\", p.value))\n\tsb.WriteString(\"time=\")\n\tif p.Time != nil {\n\t\tsb.WriteString(p.Time.String())\n\t} else {\n\t\tsb.WriteString(\"nil\")\n\t}\n\tsb.WriteString(\",addedAt=\")\n\tsb.WriteString(p.AddedAt.String())\n\treturn sb.String()\n}\n\n// Desc implements prometheus.Metric\nfunc (p *PromMetric) Desc() *prometheus.Desc {\n\tlabelNames := make([]string, 0, len(p.labels))\n\tfor _, label := range p.labels {\n\t\tlabelNames = append(labelNames, label.Name)\n\t}\n\n\treturn prometheus.NewDesc(p.Name, defaultMetricHelp, labelNames, nil)\n}\n\n// Write implements prometheus.Metric\nfunc (p *PromMetric) Write(out *dto.Metric) error {\n\tout.Untyped = &dto.Untyped{\n\t\tValue: &p.value,\n\t}\n\tout.Label = make([]*dto.LabelPair, 0, len(p.labels))\n\tfor i := range p.labels {\n\t\tout.Label = append(out.Label, &dto.LabelPair{Name: &p.labels[i].Name, Value: &p.labels[i].Value})\n\t}\n\tif p.Time == nil {\n\t\treturn nil\n\t}\n\ttimestamp := p.Time.UnixNano() / 1000000\n\tout.TimestampMs = &timestamp\n\treturn nil\n}\n\nfunc (mb *MetricBuilder) MetricsFromEvent(ev *formatters.EventMsg, now time.Time) []*PromMetric {\n\tpms := make([]*PromMetric, 0, len(ev.Values))\n\tlabels := mb.GetLabels(ev)\n\tfor vName, val := range ev.Values {\n\t\tv, err := toFloat(val)\n\t\tif err != nil {\n\t\t\tif !mb.StringsAsLabels {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv = 1.0\n\t\t}\n\t\tpm := &PromMetric{\n\t\t\tName:    mb.MetricName(ev.Name, vName),\n\t\t\tlabels:  labels,\n\t\t\tvalue:   v,\n\t\t\tAddedAt: now,\n\t\t}\n\t\tif mb.OverrideTimestamps && mb.ExportTimestamps {\n\t\t\tev.Timestamp = now.UnixNano()\n\t\t}\n\t\tif mb.ExportTimestamps {\n\t\t\ttm := time.Unix(0, ev.Timestamp)\n\t\t\tpm.Time = &tm\n\t\t}\n\t\tpms = append(pms, pm)\n\t}\n\treturn pms\n}\n\ntype MetricBuilder struct {\n\tPrefix                 string\n\tAppendSubscriptionName bool\n\tStringsAsLabels        bool\n\tOverrideTimestamps     bool\n\tExportTimestamps       bool\n}\n\nfunc (m *MetricBuilder) GetLabels(ev *formatters.EventMsg) []prompb.Label {\n\tlabels := make([]prompb.Label, 0, len(ev.Tags))\n\taddedLabels := make(map[string]struct{})\n\tfor k, v := range ev.Tags {\n\t\tlabelName := MetricNameRegex.ReplaceAllString(path.Base(k), \"_\")\n\t\tif _, ok := addedLabels[labelName]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tlabels = append(labels, prompb.Label{Name: labelName, Value: v})\n\t\taddedLabels[labelName] = struct{}{}\n\t}\n\tif !m.StringsAsLabels {\n\t\treturn labels\n\t}\n\tlabelsFromValues := buildUniqueLabelsFromValues(ev.Values, addedLabels)\n\tlabels = append(labels, labelsFromValues...)\n\treturn labels\n}\n\nfunc toFloat(v interface{}) (float64, error) {\n\tswitch i := v.(type) {\n\tcase float64:\n\t\treturn float64(i), nil\n\tcase float32:\n\t\treturn float64(i), nil\n\tcase int64:\n\t\treturn float64(i), nil\n\tcase int32:\n\t\treturn float64(i), nil\n\tcase int16:\n\t\treturn float64(i), nil\n\tcase int8:\n\t\treturn float64(i), nil\n\tcase uint64:\n\t\treturn float64(i), nil\n\tcase uint32:\n\t\treturn float64(i), nil\n\tcase uint16:\n\t\treturn float64(i), nil\n\tcase uint8:\n\t\treturn float64(i), nil\n\tcase int:\n\t\treturn float64(i), nil\n\tcase uint:\n\t\treturn float64(i), nil\n\tcase bool:\n\t\tif i {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase string:\n\t\tf, err := strconv.ParseFloat(i, 64)\n\t\tif err != nil {\n\t\t\treturn math.NaN(), err\n\t\t}\n\t\treturn f, err\n\t\t//lint:ignore SA1019 still need DecimalVal for backward compatibility\n\tcase *gnmi.Decimal64:\n\t\treturn float64(i.Digits) / math.Pow10(int(i.Precision)), nil\n\tdefault:\n\t\treturn math.NaN(), errors.New(\"toFloat: unknown value is of incompatible type\")\n\t}\n}\n\n// MetricName generates the prometheus metric name based on the output plugin,\n// the measurement name and the value name.\n// it makes sure the name matches the regex \"[^a-zA-Z0-9_]+\"\nfunc (m *MetricBuilder) MetricName(measName, valueName string) string {\n\tsb := stringBuilderPool.Get().(*strings.Builder)\n\tdefer func() {\n\t\tsb.Reset()\n\t\tstringBuilderPool.Put(sb)\n\t}()\n\tif m.Prefix != \"\" {\n\t\tsb.WriteString(MetricNameRegex.ReplaceAllString(m.Prefix, \"_\"))\n\t\tsb.WriteString(\"_\")\n\t}\n\tif m.AppendSubscriptionName {\n\t\tsb.WriteString(strings.TrimRight(MetricNameRegex.ReplaceAllString(measName, \"_\"), \"_\"))\n\t\tsb.WriteString(\"_\")\n\t}\n\tsb.WriteString(strings.TrimLeft(MetricNameRegex.ReplaceAllString(valueName, \"_\"), \"_\"))\n\treturn sb.String()\n}\n\ntype NamedTimeSeries struct {\n\tName string\n\tTS   *prompb.TimeSeries\n}\n\nfunc (m *MetricBuilder) TimeSeriesFromEvent(ev *formatters.EventMsg) []*NamedTimeSeries {\n\tpromTS := make([]*NamedTimeSeries, 0, len(ev.Values))\n\ttsLabels := m.GetLabels(ev)\n\ttimestamp := ev.Timestamp / int64(time.Millisecond)\n\tfor k, v := range ev.Values {\n\t\tfv, err := toFloat(v)\n\t\tif err != nil {\n\t\t\tif !m.StringsAsLabels {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfv = 1.0\n\t\t}\n\t\ttsName := m.MetricName(ev.Name, k)\n\t\ttsLabelsWithName := make([]prompb.Label, 0, len(tsLabels)+1)\n\t\ttsLabelsWithName = append(tsLabelsWithName, tsLabels...)\n\t\ttsLabelsWithName = append(tsLabelsWithName,\n\t\t\tprompb.Label{\n\t\t\t\tName:  labels.MetricName,\n\t\t\t\tValue: tsName,\n\t\t\t})\n\n\t\t// The prometheus spec requires label names to be sorted\n\t\t// https://prometheus.io/docs/concepts/remote_write_spec/\n\t\tslices.SortFunc(tsLabelsWithName, func(a prompb.Label, b prompb.Label) int {\n\t\t\treturn cmp.Compare(a.Name, b.Name)\n\t\t})\n\n\t\tnts := &NamedTimeSeries{\n\t\t\tName: tsName,\n\t\t\tTS: &prompb.TimeSeries{\n\t\t\t\tLabels: tsLabelsWithName,\n\t\t\t\tSamples: []prompb.Sample{\n\t\t\t\t\t{\n\t\t\t\t\t\tValue:     fv,\n\t\t\t\t\t\tTimestamp: timestamp,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tpromTS = append(promTS, nts)\n\t}\n\treturn promTS\n}\n\ntype tempLabel struct {\n\tpath        string // xpath\n\tname        string // label name\n\tvalue       string // label value\n\tsuffixCount int    // suffix count to handle duplicates\n}\n\nfunc labelNameFromPath(path string, numElems int) string {\n\telems := strings.Split(path, \"/\")\n\tnonEmpty := make([]string, 0, len(elems))\n\tfor _, e := range elems {\n\t\tif e != \"\" {\n\t\t\tnonEmpty = append(nonEmpty, e)\n\t\t}\n\t}\n\tif numElems > len(nonEmpty) {\n\t\tnumElems = len(nonEmpty)\n\t}\n\tselected := nonEmpty[len(nonEmpty)-numElems:]\n\treturn MetricNameRegex.ReplaceAllString(strings.Join(selected, \"_\"), \"_\")\n}\n\nfunc buildUniqueLabelsFromValues(values map[string]any, addedLabels map[string]struct{}) []prompb.Label {\n\ttempLabels := make([]tempLabel, 0, len(values))\n\tvar err error\n\t// gather strings and booleans as labels\n\tfor k, v := range values {\n\t\t_, err = toFloat(v)\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\t\tval := \"\"\n\t\tswitch v := v.(type) {\n\t\tcase string:\n\t\t\tval = v\n\t\tcase bool:\n\t\t\tval = strconv.FormatBool(v)\n\t\t}\n\t\tlabelName := MetricNameRegex.ReplaceAllString(filepath.Base(k), \"_\")\n\t\ttempLabels = append(tempLabels, tempLabel{\n\t\t\tpath:        k,\n\t\t\tname:        labelName,\n\t\t\tvalue:       val,\n\t\t\tsuffixCount: 1,\n\t\t})\n\t}\n\n\t// resolve duplicate label names by including more xpath elements\n\t// from the end of the path until all names are unique and don't\n\t// collide with already added label tags.\n\tfor {\n\t\tgroups := make(map[string][]int, len(tempLabels))\n\t\tfor idx, l := range tempLabels {\n\t\t\tgroups[l.name] = append(groups[l.name], idx)\n\t\t}\n\n\t\tchanged := false\n\t\tfor name, indices := range groups {\n\t\t\t_, alreadyAdded := addedLabels[name]\n\t\t\tif len(indices) <= 1 && !alreadyAdded {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, idx := range indices {\n\t\t\t\ttempLabels[idx].suffixCount++\n\t\t\t\tnewName := labelNameFromPath(tempLabels[idx].path, tempLabels[idx].suffixCount)\n\t\t\t\tif newName != tempLabels[idx].name {\n\t\t\t\t\ttempLabels[idx].name = newName\n\t\t\t\t\tchanged = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !changed {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// drop any labels that still collide after exhausting path elements.\n\ttaken := make(map[string]struct{}, len(addedLabels)+len(tempLabels))\n\tfor k := range addedLabels {\n\t\ttaken[k] = struct{}{}\n\t}\n\tresult := make([]prompb.Label, 0, len(tempLabels))\n\tfor _, l := range tempLabels {\n\t\tif _, exists := taken[l.name]; exists {\n\t\t\tcontinue\n\t\t}\n\t\ttaken[l.name] = struct{}{}\n\t\tresult = append(result, prompb.Label{Name: l.name, Value: l.value})\n\t}\n\treturn result\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_common_test.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"cmp\"\n\t\"slices\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/prometheus/prometheus/model/labels\"\n\t\"github.com/prometheus/prometheus/prompb\"\n)\n\nvar metricNameSet = map[string]struct {\n\tp         *MetricBuilder\n\tmeasName  string // aka subscription name\n\tvalueName string\n\twant      string\n}{\n\t\"with_prefix_with_subscription_with_value_no-append-subsc\": {\n\t\tp: &MetricBuilder{\n\t\t\tPrefix: \"gnmic\",\n\t\t},\n\t\tmeasName:  \"sub\",\n\t\tvalueName: \"value\",\n\t\twant:      \"gnmic_value\",\n\t},\n\t\"with_prefix_with_subscription_with_value_with_append-subsc\": {\n\t\tp: &MetricBuilder{\n\t\t\tPrefix:                 \"gnmic\",\n\t\t\tAppendSubscriptionName: true,\n\t\t},\n\t\tmeasName:  \"sub\",\n\t\tvalueName: \"value\",\n\t\twant:      \"gnmic_sub_value\",\n\t},\n\t\"with_prefix-bad-chars_with_subscription_with_value_with_append-subsc\": {\n\t\tp: &MetricBuilder{\n\t\t\tPrefix:                 \"gnmic-prefix\",\n\t\t\tAppendSubscriptionName: true,\n\t\t},\n\n\t\tmeasName:  \"sub\",\n\t\tvalueName: \"value\",\n\t\twant:      \"gnmic_prefix_sub_value\",\n\t},\n\t\"without_prefix_with_subscription_with_value_no-append-subsc\": {\n\t\tp:         &MetricBuilder{},\n\t\tmeasName:  \"sub\",\n\t\tvalueName: \"value\",\n\t\twant:      \"value\",\n\t},\n\t\"without_prefix_with_subscription_with_value_with_append-subsc\": {\n\t\tp: &MetricBuilder{\n\t\t\tAppendSubscriptionName: true,\n\t\t},\n\t\tmeasName:  \"sub\",\n\t\tvalueName: \"value\",\n\t\twant:      \"sub_value\",\n\t},\n\t\"without_prefix_with_subscription-bad-chars_with_value-bad-chars_with_append-subsc\": {\n\t\tp: &MetricBuilder{\n\t\t\tAppendSubscriptionName: true,\n\t\t},\n\t\tmeasName:  \"sub-name\",\n\t\tvalueName: \"value-name2\",\n\t\twant:      \"sub_name_value_name2\",\n\t},\n}\n\nfunc TestTimeSeriesFromEvent(t *testing.T) {\n\tmetricBuilder := &MetricBuilder{StringsAsLabels: true}\n\tevent := &formatters.EventMsg{\n\t\tName:      \"eventName\",\n\t\tTimestamp: 12345,\n\t\tTags: map[string]string{\n\t\t\t\"tagName\": \"tagVal\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"strName1\": \"strVal1\",\n\t\t\t\"strName2\": \"strVal2\",\n\t\t\t\"intName1\": 1,\n\t\t\t\"intName2\": 2,\n\t\t},\n\t\tDeletes: []string{},\n\t}\n\tfor _, nts := range metricBuilder.TimeSeriesFromEvent(event) {\n\t\tfor _, label := range nts.TS.Labels {\n\t\t\tif label.Name == labels.MetricName && label.Value != nts.Name {\n\t\t\t\tt.Errorf(\"__name__ label wrong, expected '%s', got '%s'\", nts.Name, label.Value)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTimeSeriesLabelsSorted(t *testing.T) {\n\tmetricBuilder := &MetricBuilder{StringsAsLabels: true}\n\tevent := &formatters.EventMsg{\n\t\tName:      \"eventName\",\n\t\tTimestamp: 12345,\n\t\tTags: map[string]string{\n\t\t\t\"tagName\": \"tagVal\",\n\t\t},\n\t\tValues: map[string]interface{}{\n\t\t\t\"strName1\": \"strVal1\",\n\t\t\t\"strName2\": \"strVal2\",\n\t\t\t\"intName1\": 1,\n\t\t\t\"intName2\": 2,\n\t\t},\n\t\tDeletes: []string{},\n\t}\n\tfor _, nts := range metricBuilder.TimeSeriesFromEvent(event) {\n\t\tareLabelsSorted := slices.IsSortedFunc(nts.TS.Labels, func(a prompb.Label, b prompb.Label) int {\n\t\t\treturn cmp.Compare(a.Name, b.Name)\n\t\t})\n\t\tif !areLabelsSorted {\n\t\t\tt.Errorf(\"labels names are not sorted, got '%v'\", nts.TS.Labels)\n\t\t}\n\t}\n}\n\nfunc TestMetricName(t *testing.T) {\n\tfor name, tc := range metricNameSet {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgot := tc.p.MetricName(tc.measName, tc.valueName)\n\t\t\tif got != tc.want {\n\t\t\t\tt.Errorf(\"failed at '%s', expected %v, got %+v\", name, tc.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkMetricName(b *testing.B) {\n\tfor name, tc := range metricNameSet {\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\ttc.p.MetricName(tc.measName, tc.valueName)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_buildUniqueLabelsFromValues(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tvalues      map[string]any\n\t\taddedLabels map[string]struct{}\n\t\twant        []prompb.Label\n\t}{\n\t\t{\n\t\t\tname: \"no_duplicates\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/b/c\": \"a\",\n\t\t\t\t\"a/b/d\": \"b\",\n\t\t\t\t\"a/b/e\": \"c\",\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"c\", Value: \"a\"},\n\t\t\t\t{Name: \"d\", Value: \"b\"},\n\t\t\t\t{Name: \"e\", Value: \"c\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with_duplicates\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/a/name\": \"a\",\n\t\t\t\t\"a/b/name\": \"b\",\n\t\t\t\t\"a/c/name\": \"c\",\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"a_name\", Value: \"a\"},\n\t\t\t\t{Name: \"b_name\", Value: \"b\"},\n\t\t\t\t{Name: \"c_name\", Value: \"c\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with_duplicates_3_elements\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/a/name\": \"a\",\n\t\t\t\t\"b/a/name\": \"b\",\n\t\t\t\t\"c/a/name\": \"c\",\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"a_a_name\", Value: \"a\"},\n\t\t\t\t{Name: \"b_a_name\", Value: \"b\"},\n\t\t\t\t{Name: \"c_a_name\", Value: \"c\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with_duplicates_and_floats\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/a/name\": \"a\",\n\t\t\t\t\"a/b/name\": \"b\",\n\t\t\t\t\"a/c/name\": \"1\",\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"a_name\", Value: \"a\"},\n\t\t\t\t{Name: \"b_name\", Value: \"b\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"collision_with_added_labels\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/b/name\": \"val\",\n\t\t\t},\n\t\t\taddedLabels: map[string]struct{}{\n\t\t\t\t\"name\": {},\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"b_name\", Value: \"val\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"collision_with_added_labels_and_duplicates\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/b/name\": \"v1\",\n\t\t\t\t\"a/c/name\": \"v2\",\n\t\t\t},\n\t\t\taddedLabels: map[string]struct{}{\n\t\t\t\t\"name\": {},\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"b_name\", Value: \"v1\"},\n\t\t\t\t{Name: \"c_name\", Value: \"v2\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"collision_with_added_labels_and_duplicates_2\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/b/name\": \"v1\",\n\t\t\t\t\"a/c/name\": \"v2\",\n\t\t\t},\n\t\t\taddedLabels: map[string]struct{}{\n\t\t\t\t\"b_name\": {},\n\t\t\t},\n\t\t\twant: []prompb.Label{\n\t\t\t\t{Name: \"a_b_name\", Value: \"v1\"},\n\t\t\t\t{Name: \"c_name\", Value: \"v2\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"collision_with_added_labels_full_path_exhausted\",\n\t\t\tvalues: map[string]any{\n\t\t\t\t\"a/b/name\": \"val\",\n\t\t\t},\n\t\t\taddedLabels: map[string]struct{}{\n\t\t\t\t\"name\":     {},\n\t\t\t\t\"b_name\":   {},\n\t\t\t\t\"a_b_name\": {},\n\t\t\t},\n\t\t\twant: []prompb.Label{},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\taddedLabels := tt.addedLabels\n\t\t\tif addedLabels == nil {\n\t\t\t\taddedLabels = make(map[string]struct{})\n\t\t\t}\n\t\t\tgot := buildUniqueLabelsFromValues(tt.values, addedLabels)\n\t\t\tif len(got) != len(tt.want) {\n\t\t\t\tt.Errorf(\"buildUniqueLabelsFromValues() length = %d, want %d\", len(got), len(tt.want))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsort.Slice(got, func(i, j int) bool {\n\t\t\t\treturn got[i].Name < got[j].Name\n\t\t\t})\n\t\t\tsort.Slice(tt.want, func(i, j int) bool {\n\t\t\t\treturn tt.want[i].Name < tt.want[j].Name\n\t\t\t})\n\t\t\tfor i, label := range got {\n\t\t\t\texpected := tt.want[i]\n\t\t\t\tif label.Name != expected.Name || label.Value != expected.Value {\n\t\t\t\t\tt.Errorf(\"Label mismatch at index %d: got %+v, want %+v\", i, label, expected)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMetricBuilder_MetricsFromEvent(t *testing.T) {\n\ttests := []struct {\n\t\tname string // description of this test case\n\t\t// Named input parameters for target function.\n\t\tev   *formatters.EventMsg\n\t\tnow  time.Time\n\t\twant []*PromMetric\n\t}{\n\t\t{\n\t\t\tname: \"no_duplicates\",\n\t\t\tev: &formatters.EventMsg{\n\t\t\t\tName:      \"eventName\",\n\t\t\t\tTimestamp: 42,\n\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\"t1\": \"v1\",\n\t\t\t\t\t\"t2\": \"v2\",\n\t\t\t\t},\n\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\"a/b/c\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnow: time.Unix(0, 42),\n\t\t\twant: []*PromMetric{\n\t\t\t\t{\n\t\t\t\t\tName:  \"a_b_c\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tlabels: []prompb.Label{\n\t\t\t\t\t\t{Name: \"t1\", Value: \"v1\"},\n\t\t\t\t\t\t{Name: \"t2\", Value: \"v2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no_duplicates_strings_as_labels\",\n\t\t\tev: &formatters.EventMsg{\n\t\t\t\tName:      \"eventName\",\n\t\t\t\tTimestamp: 42,\n\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\"t1\": \"v1\",\n\t\t\t\t\t\"t2\": \"v2\",\n\t\t\t\t},\n\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\"a/b/c\": \"a\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnow: time.Unix(0, 42),\n\t\t\twant: []*PromMetric{\n\t\t\t\t{\n\t\t\t\t\tName:  \"a_b_c\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tlabels: []prompb.Label{\n\t\t\t\t\t\t{Name: \"t1\", Value: \"v1\"},\n\t\t\t\t\t\t{Name: \"t2\", Value: \"v2\"},\n\t\t\t\t\t\t{Name: \"c\", Value: \"a\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"duplicates_strings_as_labels\",\n\t\t\tev: &formatters.EventMsg{\n\t\t\t\tName:      \"eventName\",\n\t\t\t\tTimestamp: 42,\n\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\"t1\": \"v1\",\n\t\t\t\t\t\"t2\": \"v2\",\n\t\t\t\t},\n\t\t\t\tValues: map[string]any{\n\t\t\t\t\t\"a/a/c\": \"a\",\n\t\t\t\t\t\"a/b/c\": \"b\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tnow: time.Unix(0, 42),\n\t\t\twant: []*PromMetric{\n\t\t\t\t{\n\t\t\t\t\tName:  \"a_a_c\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tlabels: []prompb.Label{\n\t\t\t\t\t\t{Name: \"t1\", Value: \"v1\"},\n\t\t\t\t\t\t{Name: \"t2\", Value: \"v2\"},\n\t\t\t\t\t\t{Name: \"a_c\", Value: \"a\"},\n\t\t\t\t\t\t{Name: \"b_c\", Value: \"b\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  \"a_b_c\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tlabels: []prompb.Label{\n\t\t\t\t\t\t{Name: \"t1\", Value: \"v1\"},\n\t\t\t\t\t\t{Name: \"t2\", Value: \"v2\"},\n\t\t\t\t\t\t{Name: \"a_c\", Value: \"a\"},\n\t\t\t\t\t\t{Name: \"b_c\", Value: \"b\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmb := &MetricBuilder{\n\t\t\t\tStringsAsLabels: true,\n\t\t\t}\n\t\t\tgot := mb.MetricsFromEvent(tt.ev, tt.now)\n\t\t\tif len(got) != len(tt.want) {\n\t\t\t\tt.Errorf(\"MetricsFromEvent() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t\tsort.Slice(got, func(i, j int) bool {\n\t\t\t\treturn got[i].Name < got[j].Name\n\t\t\t})\n\t\t\tsort.Slice(tt.want, func(i, j int) bool {\n\t\t\t\treturn tt.want[i].Name < tt.want[j].Name\n\t\t\t})\n\t\t\tfor i, pm := range got {\n\t\t\t\texpected := tt.want[i]\n\t\t\t\tif pm.Name != expected.Name || pm.value != expected.value {\n\t\t\t\t\tt.Errorf(\"Metric mismatch at index %d: got %+v, want %+v\", i, pm, expected)\n\t\t\t\t}\n\t\t\t\tif len(pm.labels) != len(expected.labels) {\n\t\t\t\t\tt.Errorf(\"Metric labels mismatch at index %d: got %+v, want %+v\", i, pm.labels, expected.labels)\n\t\t\t\t}\n\t\t\t\tsort.Slice(pm.labels, func(i, j int) bool {\n\t\t\t\t\treturn pm.labels[i].Name < pm.labels[j].Name\n\t\t\t\t})\n\t\t\t\tsort.Slice(expected.labels, func(i, j int) bool {\n\t\t\t\t\treturn expected.labels[i].Name < expected.labels[j].Name\n\t\t\t\t})\n\t\t\t\tfor j, label := range pm.labels {\n\t\t\t\t\texpectedLabel := expected.labels[j]\n\t\t\t\t\tif label.Name != expectedLabel.Name || label.Value != expectedLabel.Value {\n\t\t\t\t\t\tt.Errorf(\"Metric label mismatch at index %d: got %+v, want %+v\", j, label, expectedLabel)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_output/prometheus_cache.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n)\n\nfunc (p *prometheusOutput) collectFromCache(ch chan<- prometheus.Metric) {\n\tnotifications, err := p.gnmiCache.ReadAll()\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to read from cache: %v\", err)\n\t\treturn\n\t}\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\tif cfg == nil || dc == nil {\n\t\treturn\n\t}\n\tnumNotifications := len(notifications)\n\tprometheusNumberOfCachedMetrics.WithLabelValues(cfg.Name).Set(float64(numNotifications))\n\n\tp.targetsMeta.DeleteExpired()\n\tevents := make([]*formatters.EventMsg, 0, numNotifications)\n\tfor subName, notifs := range notifications {\n\t\t// build events without processors\n\t\tfor _, notif := range notifs {\n\t\t\ttargetName := notif.GetPrefix().GetTarget()\n\t\t\tvar meta outputs.Meta\n\t\t\tif item := p.targetsMeta.Get(subName + \"/\" + targetName); item != nil {\n\t\t\t\tmeta = item.Value()\n\t\t\t}\n\t\t\tievents, err := formatters.ResponseToEventMsgs(\n\t\t\t\tsubName,\n\t\t\t\t&gnmi.SubscribeResponse{\n\t\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{Update: notif},\n\t\t\t\t},\n\t\t\t\tmeta)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed to convert gNMI notifications to events: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tevents = append(events, ievents...)\n\t\t}\n\t}\n\n\tif cfg.CacheConfig.Debug {\n\t\tp.logger.Printf(\"got %d events from cache pre processors\", len(events))\n\t}\n\tfor _, proc := range dc.evps {\n\t\tevents = proc.Apply(events...)\n\t}\n\tif cfg.CacheConfig.Debug {\n\t\tp.logger.Printf(\"got %d events from cache post processors\", len(events))\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)\n\tdefer cancel()\n\tnow := time.Now()\n\tfor _, ev := range events {\n\t\tfor _, pm := range dc.mb.MetricsFromEvent(ev, now) {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tp.logger.Printf(\"collection context terminated: %v\", ctx.Err())\n\t\t\t\treturn\n\t\t\tcase ch <- pm:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc cacheEqual(a, b *cache.Config) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\treturn a != nil &&\n\t\tb != nil &&\n\t\ta.Expiration == b.Expiration &&\n\t\ta.Debug == b.Debug &&\n\t\ta.Address == b.Address &&\n\t\ta.Timeout == b.Timeout &&\n\t\ta.Type == b.Type &&\n\t\ta.Username == b.Username &&\n\t\ta.Password == b.Password &&\n\t\ta.MaxBytes == b.MaxBytes &&\n\t\ta.MaxMsgsPerSubscription == b.MaxMsgsPerSubscription &&\n\t\ta.FetchBatchSize == b.FetchBatchSize &&\n\t\ta.FetchWaitTime == b.FetchWaitTime\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_output/prometheus_metrics.go",
    "content": "// © 2023 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tnamespace = \"gnmic\"\n\tsubsystem = \"prometheus_output\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar prometheusNumberOfMetrics = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName:      \"number_of_prometheus_metrics_total\",\n\t\tHelp:      \"Number of metrics stored by the prometheus output\",\n\t}, []string{\"name\"})\n\nvar prometheusNumberOfCachedMetrics = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName:      \"number_of_prometheus_cached_metrics_total\",\n\t\tHelp:      \"Number of metrics cached by the prometheus output\",\n\t}, []string{\"name\"})\n\nfunc (p *prometheusOutput) initMetrics(cfg *config) {\n\tif cfg.CacheConfig == nil {\n\t\tprometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(0)\n\t\treturn\n\t}\n\tprometheusNumberOfCachedMetrics.WithLabelValues(cfg.Name).Set(0)\n}\n\nfunc (p *prometheusOutput) registerMetrics() error {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif p.reg == nil {\n\t\treturn nil\n\t}\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif cfg.CacheConfig == nil {\n\t\t\terr = p.reg.Register(prometheusNumberOfMetrics)\n\t\t\treturn\n\t\t}\n\t\terr = p.reg.Register(prometheusNumberOfCachedMetrics)\n\t})\n\tp.initMetrics(cfg)\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_output/prometheus_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/hashicorp/consul/api\"\n\t\"github.com/jellydator/ttlcache/v3\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tpromcom \"github.com/openconfig/gnmic/pkg/outputs/prometheus_output\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\toutputType        = \"prometheus\"\n\tdefaultListen     = \":9804\"\n\tdefaultPath       = \"/metrics\"\n\tdefaultExpiration = time.Minute\n\tloggingPrefix     = \"[prometheus_output:%s] \"\n\t// this is used to timeout the collection method\n\t// in case it drags for too long\n\tdefaultTimeout    = 10 * time.Second\n\tdefaultNumWorkers = 1\n)\n\nfunc init() {\n\toutputs.Register(outputType, func() outputs.Output {\n\t\treturn &prometheusOutput{}\n\t})\n}\n\ntype prometheusOutput struct {\n\toutputs.BaseOutput\n\n\tcfg       *atomic.Pointer[config]\n\tdynCfg    *atomic.Pointer[dynConfig]\n\tlogger    *log.Logger\n\teventChan chan *formatters.EventMsg\n\tmsgChan   chan *outputs.ProtoMsg\n\n\twg     *sync.WaitGroup\n\tserver *http.Server\n\n\tsync.Mutex\n\tentries map[uint64]*promcom.PromMetric\n\n\tconsulClient *api.Client\n\n\tgnmiCache   cache.Cache\n\ttargetsMeta *ttlcache.Cache[string, outputs.Meta]\n\n\treg    *prometheus.Registry\n\tstore  store.Store[any]\n\trunCfn context.CancelFunc\n\trunCtx context.Context\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n\tmb        *promcom.MetricBuilder\n}\n\ntype config struct {\n\tName                   string               `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tListen                 string               `mapstructure:\"listen,omitempty\" json:\"listen,omitempty\"`\n\tTLS                    *types.TLSConfig     `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tPath                   string               `mapstructure:\"path,omitempty\" json:\"path,omitempty\"`\n\tExpiration             time.Duration        `mapstructure:\"expiration,omitempty\" json:\"expiration,omitempty\"`\n\tMetricPrefix           string               `mapstructure:\"metric-prefix,omitempty\" json:\"metric-prefix,omitempty\"`\n\tAppendSubscriptionName bool                 `mapstructure:\"append-subscription-name,omitempty\" json:\"append-subscription-name,omitempty\"`\n\tExportTimestamps       bool                 `mapstructure:\"export-timestamps,omitempty\" json:\"export-timestamps,omitempty\"`\n\tOverrideTimestamps     bool                 `mapstructure:\"override-timestamps,omitempty\" json:\"override-timestamps,omitempty\"`\n\tAddTarget              string               `mapstructure:\"add-target,omitempty\" json:\"add-target,omitempty\"`\n\tTargetTemplate         string               `mapstructure:\"target-template,omitempty\" json:\"target-template,omitempty\"`\n\tStringsAsLabels        bool                 `mapstructure:\"strings-as-labels,omitempty\" json:\"strings-as-labels,omitempty\"`\n\tDebug                  bool                 `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\tEventProcessors        []string             `mapstructure:\"event-processors,omitempty\" json:\"event-processors,omitempty\"`\n\tServiceRegistration    *serviceRegistration `mapstructure:\"service-registration,omitempty\" json:\"service-registration,omitempty\"`\n\tTimeout                time.Duration        `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tCacheConfig            *cache.Config        `mapstructure:\"cache,omitempty\" json:\"cache-config,omitempty\"`\n\tNumWorkers             int                  `mapstructure:\"num-workers,omitempty\" json:\"num-workers,omitempty\"`\n\tEnableMetrics          bool                 `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\n\tclusterName string\n\taddress     string\n\tport        int\n}\n\nfunc (p *prometheusOutput) String() string {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (p *prometheusOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(p.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(p.logger, cfg.EventProcessors, ps, tcs, acts)\n}\n\nfunc (p *prometheusOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && p.logger != nil {\n\t\tp.logger.SetOutput(logger.Writer())\n\t\tp.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (p *prometheusOutput) init() {\n\tp.cfg = new(atomic.Pointer[config])\n\tp.dynCfg = new(atomic.Pointer[dynConfig])\n\tp.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)\n\tp.eventChan = make(chan *formatters.EventMsg)\n\tp.msgChan = make(chan *outputs.ProtoMsg, 10_000)\n\tp.wg = new(sync.WaitGroup)\n\tp.entries = make(map[uint64]*promcom.PromMetric)\n}\n\nfunc (p *prometheusOutput) Init(ctx context.Context, name string, cfg map[string]any, opts ...outputs.Option) error {\n\tp.init() // init struct fields\n\tp.runCtx, p.runCfn = context.WithCancel(ctx)\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.Name == \"\" {\n\t\tnewCfg.Name = name\n\t}\n\n\tp.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.store = options.Store\n\n\t// apply logger\n\tp.setLogger(options.Logger)\n\n\t// set defaults\n\terr = p.setDefaultsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize registry\n\tp.reg = options.Registry\n\terr = p.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.setName(options.Name, newCfg)\n\tp.setClusterName(options.ClusterName, newCfg)\n\n\tp.cfg.Store(newCfg)\n\n\tdc := new(dynConfig)\n\n\t// initialize target template\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\t// initialize event processors\n\tdc.evps, err = p.buildEventProcessors(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc.mb = &promcom.MetricBuilder{\n\t\tPrefix:                 newCfg.MetricPrefix,\n\t\tAppendSubscriptionName: newCfg.AppendSubscriptionName,\n\t\tStringsAsLabels:        newCfg.StringsAsLabels,\n\t\tOverrideTimestamps:     newCfg.OverrideTimestamps,\n\t\tExportTimestamps:       newCfg.ExportTimestamps,\n\t}\n\n\tp.dynCfg.Store(dc)\n\n\tif newCfg.CacheConfig != nil {\n\t\tp.gnmiCache, err = cache.New(\n\t\t\tnewCfg.CacheConfig,\n\t\t\tcache.WithLogger(p.logger),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.targetsMeta = ttlcache.New(ttlcache.WithTTL[string, outputs.Meta](newCfg.Expiration))\n\t}\n\n\t// create prometheus registry\n\tregistry := prometheus.NewRegistry()\n\n\terr = registry.Register(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// create http server\n\tpromHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\n\tmux := http.NewServeMux()\n\tmux.Handle(newCfg.Path, promHandler)\n\n\tp.server = &http.Server{\n\t\tAddr:    newCfg.Listen,\n\t\tHandler: mux,\n\t}\n\n\t// create tcp listener\n\tlistener, err := p.createListenerFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// start worker\n\tp.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo p.worker(p.runCtx)\n\t}\n\n\tif newCfg.CacheConfig == nil {\n\t\tp.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer p.wg.Done()\n\t\t\tp.expireMetricsPeriodic(p.runCtx)\n\t\t}()\n\t}\n\n\tp.wg.Add(1)\n\tgo func() {\n\t\tdefer p.wg.Done()\n\t\tdefer listener.Close()\n\t\terr = p.server.Serve(listener)\n\t\tif err != nil && err != http.ErrServerClosed {\n\t\t\tp.logger.Printf(\"prometheus server error: %v\", err)\n\t\t}\n\t}()\n\tgo p.registerService(p.runCtx)\n\tp.logger.Printf(\"initialized prometheus output: %s\", p.String())\n\treturn nil\n}\n\nfunc (p *prometheusOutput) Validate(cfg map[string]any) error {\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = p.setDefaultsFor(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (p *prometheusOutput) Update(ctx context.Context, cfg map[string]any) error {\n\t// decode new config\n\tnewCfg := new(config)\n\tif err := outputs.DecodeConfig(cfg, newCfg); err != nil {\n\t\treturn err\n\t}\n\n\tcurrCfg := p.cfg.Load()\n\tdc := new(dynConfig)\n\t// apply defaults and derived fields for the new config\n\ttmp := *newCfg    // copy for mutation\n\tif p.cfg != nil { // init name and service registration name, id and tags\n\t\ttmp.Name = currCfg.Name\n\t\tif currCfg.ServiceRegistration != nil {\n\t\t\tif tmp.ServiceRegistration.Name == \"\" {\n\t\t\t\ttmp.ServiceRegistration.Name = currCfg.ServiceRegistration.Name\n\t\t\t}\n\t\t\ttmp.ServiceRegistration.id = fmt.Sprintf(\"%s-%s\", tmp.ServiceRegistration.Name, tmp.Name)\n\t\t\ttmp.ServiceRegistration.Tags = append(tmp.ServiceRegistration.Tags, fmt.Sprintf(\"gnmic-instance=%s\", tmp.ServiceRegistration.Name))\n\t\t}\n\t}\n\tif err := p.setDefaultsFor(&tmp); err != nil { // factor setDefaults to accept *config\n\t\treturn err\n\t}\n\n\t// rebuild objects that depend on config\n\tdc.targetTpl = outputs.DefaultTargetTemplate\n\tif tmp.TargetTemplate != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", tmp.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = t.Funcs(outputs.TemplateFuncs)\n\t}\n\n\t// event processors\n\tvar err error\n\tprevDC := p.dynCfg.Load()\n\tif slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 {\n\t\tdc.evps, err = p.buildEventProcessors(&tmp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\t// metric builder\n\tdc.mb = &promcom.MetricBuilder{\n\t\tPrefix:                 tmp.MetricPrefix,\n\t\tAppendSubscriptionName: tmp.AppendSubscriptionName,\n\t\tStringsAsLabels:        tmp.StringsAsLabels,\n\t\tOverrideTimestamps:     tmp.OverrideTimestamps,\n\t\tExportTimestamps:       tmp.ExportTimestamps,\n\t}\n\n\tp.dynCfg.Store(dc)\n\n\t// rebuild http objects if needed\n\trebuildHTTPServer := p.needHTTPRebuild(currCfg, &tmp)\n\tvar newServer *http.Server\n\tvar newListener net.Listener\n\tif rebuildHTTPServer {\n\t\treg := prometheus.NewRegistry()\n\t\tif err := reg.Register(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpromHandler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})\n\t\tmux := http.NewServeMux()\n\t\tmux.Handle(tmp.Path, promHandler)\n\n\t\ts := &http.Server{\n\t\t\tAddr:    tmp.Listen,\n\t\t\tHandler: mux,\n\t\t}\n\t\tl, err := p.createListenerFor(&tmp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnewServer = s\n\t\tnewListener = l\n\t}\n\n\t// cache rebuild if CacheConfig toggled or changed\n\tvar newCache cache.Cache\n\tvar newTargetsMeta *ttlcache.Cache[string, outputs.Meta]\n\tif !cacheEqual(currCfg.CacheConfig, tmp.CacheConfig) {\n\t\tif tmp.CacheConfig != nil {\n\t\t\tc, err := cache.New(tmp.CacheConfig, cache.WithLogger(p.logger))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewCache = c\n\t\t\tnewTargetsMeta = ttlcache.New(ttlcache.WithTTL[string, outputs.Meta](tmp.Expiration))\n\t\t}\n\t} else {\n\t\t// keep existing cache/meta if not changed\n\t\tp.Lock()\n\t\tnewCache = p.gnmiCache\n\t\tnewTargetsMeta = p.targetsMeta\n\t\tp.Unlock()\n\t}\n\n\t// swap under lock\n\tp.Lock()\n\toldServer := p.server\n\toldRunCfn := p.runCfn\n\toldCache := p.gnmiCache\n\n\tp.cfg.Store(&tmp)\n\n\tif rebuildHTTPServer {\n\t\tp.server = newServer\n\t}\n\tif newCache != nil || (oldCache != nil && tmp.CacheConfig == nil) {\n\t\tp.gnmiCache = newCache\n\t\tp.targetsMeta = newTargetsMeta\n\t}\n\t// create a new worker ctx\n\tp.runCtx, p.runCfn = context.WithCancel(ctx)\n\tp.Unlock()\n\n\t// Start/Restart components that changed\n\n\t// HTTP server\n\tif rebuildHTTPServer {\n\t\tif oldServer != nil {\n\t\t\t_ = oldServer.Close() // stop old server; Serve will exit\n\t\t}\n\t\t// start the new one\n\t\tp.wg.Add(1)\n\t\tgo func(srv *http.Server, l net.Listener) {\n\t\t\tdefer p.wg.Done()\n\t\t\tdefer l.Close()\n\t\t\tif err := srv.Serve(l); err != nil && err != http.ErrServerClosed {\n\t\t\t\tp.logger.Printf(\"prometheus server error: %v\", err)\n\t\t\t}\n\t\t}(newServer, newListener)\n\t}\n\n\t// workers (stop old, start new)\n\tif oldRunCfn != nil {\n\t\toldRunCfn()\n\t}\n\t// start workers with new num-workers\n\tp.wg.Add(tmp.NumWorkers)\n\tfor i := 0; i < tmp.NumWorkers; i++ {\n\t\tgo p.worker(p.runCtx)\n\t}\n\tif tmp.CacheConfig == nil {\n\t\tp.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer p.wg.Done()\n\t\t\tp.expireMetricsPeriodic(p.runCtx)\n\t\t}()\n\t}\n\n\t// restart service registration\n\tgo p.registerService(p.runCtx)\n\n\tp.logger.Printf(\"updated prometheus output: %s\", p.String())\n\treturn nil\n}\n\nfunc (p *prometheusOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tp.logger,\n\t\tp.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tp.dynCfg.Store(&newDC)\n\t\tp.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (p *prometheusOutput) needHTTPRebuild(old, new *config) bool {\n\tif p.server == nil || old == nil || new == nil {\n\t\treturn true\n\t}\n\treturn old.Listen != new.Listen ||\n\t\told.Path != new.Path ||\n\t\t!old.TLS.Equal(new.TLS)\n}\n\nfunc (p *prometheusOutput) createListenerFor(c *config) (net.Listener, error) {\n\tif c.TLS == nil {\n\t\treturn net.Listen(\"tcp\", c.Listen)\n\t}\n\ttlsConfig, err := utils.NewTLSConfig(\n\t\tc.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, c.TLS.ClientAuth, true, true,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tls.Listen(\"tcp\", c.Listen, tlsConfig)\n}\n\n// Write implements the outputs.Output interface\nfunc (p *prometheusOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\n\twctx, cancel := context.WithTimeout(ctx, cfg.Timeout)\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase p.msgChan <- outputs.NewProtoMsg(rsp, meta):\n\tcase <-wctx.Done():\n\t\tif cfg.Debug {\n\t\t\tp.logger.Printf(\"writing expired after %s\", cfg.Timeout)\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc (p *prometheusOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tdc := p.dynCfg.Load()\n\tif dc == nil {\n\t\treturn\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tvar evs = []*formatters.EventMsg{ev}\n\t\tfor _, proc := range dc.evps {\n\t\t\tevs = proc.Apply(evs...)\n\t\t}\n\t\tfor _, pev := range evs {\n\t\t\tp.eventChan <- pev\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) Close() error {\n\tp.Lock()\n\tconsulClient := p.consulClient\n\tgnmiCache := p.gnmiCache\n\tserver := p.server\n\tcfg := p.cfg.Load()\n\tp.Unlock()\n\n\tvar err error\n\tif consulClient != nil && cfg != nil && cfg.ServiceRegistration != nil {\n\t\terr = consulClient.Agent().ServiceDeregister(cfg.ServiceRegistration.id)\n\t\tif err != nil {\n\t\t\t// ignore 404 and unknown service ID errors\n\t\t\tif !strings.Contains(err.Error(), \"404\") &&\n\t\t\t\t!strings.Contains(err.Error(), \"Unknown service ID\") {\n\t\t\t\tp.logger.Printf(\"failed to deregister consul service: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\tif p.runCfn != nil {\n\t\tp.runCfn()\n\t}\n\tif gnmiCache != nil {\n\t\tgnmiCache.Stop()\n\t}\n\tif server != nil {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\t\terr = server.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to shutdown http server: %v\", err)\n\t\t}\n\t}\n\tp.logger.Printf(\"closed.\")\n\tp.wg.Wait()\n\treturn nil\n}\n\n// Describe implements prometheus.Collector\nfunc (p *prometheusOutput) Describe(ch chan<- *prometheus.Desc) {}\n\n// Collect implements prometheus.Collector\nfunc (p *prometheusOutput) Collect(ch chan<- prometheus.Metric) {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\n\tp.Lock()\n\tdefer p.Unlock()\n\tif cfg.CacheConfig != nil {\n\t\tp.collectFromCache(ch)\n\t\treturn\n\t}\n\t// No cache\n\t// run expire before exporting metrics\n\tp.expireMetrics()\n\n\tctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)\n\tdefer cancel()\n\n\tfor _, entry := range p.entries {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tp.logger.Printf(\"collection context terminated: %v\", ctx.Err())\n\t\t\treturn\n\t\tcase ch <- entry:\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) worker(ctx context.Context) {\n\tdefer p.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase ev := <-p.eventChan:\n\t\t\tp.workerHandleEvent(ev)\n\t\tcase m := <-p.msgChan:\n\t\t\tp.workerHandleProto(ctx, m)\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) workerHandleProto(ctx context.Context, m *outputs.ProtoMsg) {\n\tpmsg := m.GetMsg()\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\n\tif cfg == nil || dc == nil {\n\t\treturn\n\t}\n\tp.Lock()\n\tgnmiCache := p.gnmiCache\n\ttargetsMeta := p.targetsMeta\n\tp.Unlock()\n\n\tswitch pmsg := pmsg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tmeta := m.GetMeta()\n\t\tmeasName := \"default\"\n\t\tif subName, ok := meta[\"subscription-name\"]; ok {\n\t\t\tmeasName = subName\n\t\t}\n\t\tvar err error\n\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t}\n\t\tif gnmiCache != nil {\n\t\t\tgnmiCache.Write(ctx, measName, pmsg)\n\t\t\ttarget := utils.GetHost(meta[\"source\"])\n\t\t\ttargetsMeta.Set(measName+\"/\"+target, meta, ttlcache.DefaultTTL)\n\t\t\treturn\n\t\t}\n\t\tevents, err := formatters.ResponseToEventMsgs(measName, pmsg, meta, dc.evps...)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to convert message to event: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tp.workerHandleEvent(events...)\n\t}\n}\n\ntype metricAndKey struct {\n\tk uint64\n\tm *promcom.PromMetric\n}\n\nfunc (p *prometheusOutput) workerHandleEvent(evs ...*formatters.EventMsg) {\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\tif cfg == nil || dc == nil {\n\t\treturn\n\t}\n\tif cfg.Debug {\n\t\tp.logger.Printf(\"got event to store: %+v\", evs)\n\t}\n\tmks := make([]*metricAndKey, 0, len(evs))\n\tfor _, ev := range evs {\n\t\tfor _, pm := range dc.mb.MetricsFromEvent(ev, time.Now()) {\n\t\t\tmks = append(mks, &metricAndKey{\n\t\t\t\tm: pm,\n\t\t\t\tk: pm.CalculateKey(),\n\t\t\t})\n\t\t}\n\t}\n\tp.Lock()\n\n\tdefer p.Unlock()\n\tfor _, mk := range mks {\n\t\t//\tkey := pm.CalculateKey()\n\t\te, ok := p.entries[mk.k]\n\t\t// if the entry key is not present add it to the map.\n\t\t// if present add it only if the entry timestamp is newer than the\n\t\t// existing one.\n\t\tif !ok || mk.m.Time == nil || (ok && mk.m.Time != nil && e.Time.Before(*mk.m.Time)) {\n\t\t\tp.entries[mk.k] = mk.m\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"saved key=%d, metric: %+v\", mk.k, mk.m)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) expireMetrics() {\n\tcfg := p.cfg.Load()\n\tif cfg == nil || cfg.Expiration <= 0 {\n\t\treturn\n\t}\n\texpiry := time.Now().Add(-cfg.Expiration)\n\tfor k, e := range p.entries {\n\t\tif cfg.ExportTimestamps {\n\t\t\tif e.Time.Before(expiry) {\n\t\t\t\tdelete(p.entries, k)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif e.AddedAt.Before(expiry) {\n\t\t\tdelete(p.entries, k)\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) expireMetricsPeriodic(ctx context.Context) {\n\tcfg := p.cfg.Load()\n\tif cfg == nil || cfg.Expiration <= 0 {\n\t\treturn\n\t}\n\n\tp.Lock()\n\tprometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(float64(len(p.entries)))\n\tp.Unlock()\n\n\tticker := time.NewTicker(cfg.Expiration)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcfg := p.cfg.Load()\n\t\t\tif cfg == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.Lock()\n\t\t\tp.expireMetrics()\n\t\t\tprometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(float64(len(p.entries)))\n\t\t\tp.Unlock()\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) setDefaultsFor(c *config) error {\n\tif c.Listen == \"\" {\n\t\tc.Listen = defaultListen\n\t}\n\tif c.Path == \"\" {\n\t\tc.Path = defaultPath\n\t}\n\tif c.Expiration == 0 {\n\t\tc.Expiration = defaultExpiration\n\t}\n\tif c.CacheConfig != nil && c.AddTarget == \"\" {\n\t\tc.AddTarget = \"if-not-present\"\n\t}\n\tif c.Timeout <= 0 {\n\t\tc.Timeout = defaultTimeout\n\t}\n\tif c.NumWorkers <= 0 {\n\t\tc.NumWorkers = defaultNumWorkers\n\t}\n\tif c.ServiceRegistration == nil {\n\t\treturn nil\n\t}\n\n\tp.setServiceRegistrationDefaults(c)\n\tvar err error\n\tvar port string\n\tswitch {\n\tcase c.ServiceRegistration.ServiceAddress != \"\":\n\t\tc.address, port, err = net.SplitHostPort(c.ServiceRegistration.ServiceAddress)\n\t\tif err != nil {\n\t\t\t// if service-address does not include a port number, use the port number from the listen field\n\t\t\tif strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\t\tc.address = c.ServiceRegistration.ServiceAddress\n\t\t\t\t_, port, err = net.SplitHostPort(c.Listen)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Printf(\"invalid 'listen' field format: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tc.port, err = strconv.Atoi(port)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Printf(\"invalid 'listen' field format: %v\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// if the error is not related to a missing port, fail\n\t\t\tp.logger.Printf(\"invalid 'service-registration.service-address' field format: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\t// the service-address contains both an address and a port number\n\t\tc.port, err = strconv.Atoi(port)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"invalid 'service-registration.service-address' field format: %v\", err)\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tc.address, port, err = net.SplitHostPort(c.Listen)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"invalid 'listen' field format: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tc.port, err = strconv.Atoi(port)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"invalid 'listen' field format: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *prometheusOutput) setName(name string, cfg *config) {\n\tif cfg.Name == \"\" {\n\t\tcfg.Name = name\n\t}\n\tif cfg.ServiceRegistration != nil {\n\t\tif cfg.ServiceRegistration.Name == \"\" {\n\t\t\tcfg.ServiceRegistration.Name = fmt.Sprintf(\"prometheus-%s\", cfg.Name)\n\t\t}\n\t\tif name == \"\" {\n\t\t\tname = uuid.New().String()\n\t\t}\n\t\tcfg.ServiceRegistration.id = fmt.Sprintf(\"%s-%s\", cfg.ServiceRegistration.Name, name)\n\t\tcfg.ServiceRegistration.Tags = append(cfg.ServiceRegistration.Tags, fmt.Sprintf(\"gnmic-instance=%s\", name))\n\t}\n}\n\nfunc (p *prometheusOutput) setClusterName(name string, cfg *config) {\n\tcfg.clusterName = name\n\tif cfg.ServiceRegistration != nil {\n\t\tcfg.ServiceRegistration.Tags = append(cfg.ServiceRegistration.Tags, fmt.Sprintf(\"gnmic-cluster=%s\", name))\n\t}\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_output/prometheus_service_registration.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/hashicorp/consul/api\"\n\n\t\"github.com/openconfig/gnmic/pkg/lockers\"\n)\n\nconst (\n\tdefaultServiceRegistrationAddress = \"localhost:8500\"\n\tdefaultRegistrationCheckInterval  = 5 * time.Second\n\tdefaultMaxServiceFail             = 3\n)\n\ntype serviceRegistration struct {\n\tAddress    string `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tDatacenter string `mapstructure:\"datacenter,omitempty\" json:\"datacenter,omitempty\"`\n\tUsername   string `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword   string `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n\tToken      string `mapstructure:\"token,omitempty\" json:\"token,omitempty\"`\n\n\tName             string        `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tCheckInterval    time.Duration `mapstructure:\"check-interval,omitempty\" json:\"check-interval,omitempty\"`\n\tMaxFail          int           `mapstructure:\"max-fail,omitempty\" json:\"max-fail,omitempty\"`\n\tTags             []string      `mapstructure:\"tags,omitempty\" json:\"tags,omitempty\"`\n\tEnableHTTPCheck  bool          `mapstructure:\"enable-http-check,omitempty\" json:\"enable-http-check,omitempty\"`\n\tHTTPCheckAddress string        `mapstructure:\"http-check-address,omitempty\" json:\"http-check-address,omitempty\"`\n\tUseLock          bool          `mapstructure:\"use-lock,omitempty\" json:\"use-lock,omitempty\"`\n\tServiceAddress   string        `mapstructure:\"service-address,omitempty\" json:\"service-address,omitempty\"`\n\n\tderegisterAfter  string\n\tid               string\n\thttpCheckAddress string\n}\n\nfunc (p *prometheusOutput) registerService(ctx context.Context) {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\tif cfg.ServiceRegistration == nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tp.logger.Printf(\"deregistering service: %s\", cfg.ServiceRegistration.Name)\n\t}()\n\tp.logger.Printf(\"registering service: %s\", cfg.ServiceRegistration.Name)\n\tvar err error\n\tclientConfig := &api.Config{\n\t\tAddress:    cfg.ServiceRegistration.Address,\n\t\tScheme:     \"http\",\n\t\tDatacenter: cfg.ServiceRegistration.Datacenter,\n\t\tToken:      cfg.ServiceRegistration.Token,\n\t}\n\tif cfg.ServiceRegistration.Username != \"\" && cfg.ServiceRegistration.Password != \"\" {\n\t\tclientConfig.HttpAuth = &api.HttpBasicAuth{\n\t\t\tUsername: cfg.ServiceRegistration.Username,\n\t\t\tPassword: cfg.ServiceRegistration.Password,\n\t\t}\n\t}\n\tdoneCh := make(chan struct{})\nINITCONSUL:\n\tif ctx.Err() != nil {\n\t\tif errors.Is(ctx.Err(), context.Canceled) {\n\t\t\tp.logger.Printf(\"context canceled: %v\", ctx.Err())\n\t\t\tclose(doneCh)\n\t\t\tif p.consulClient != nil {\n\t\t\t\terr = p.consulClient.Agent().ServiceDeregister(cfg.ServiceRegistration.id)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Printf(\"failed to deregister service in consul: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tp.consulClient, err = api.NewClient(clientConfig)\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to connect to consul: %v\", err)\n\t\ttime.Sleep(1 * time.Second)\n\t\tgoto INITCONSUL\n\t}\n\tself, err := p.consulClient.Agent().Self()\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to connect to consul: %v\", err)\n\t\ttime.Sleep(1 * time.Second)\n\t\tgoto INITCONSUL\n\t}\n\tif cfg, ok := self[\"Config\"]; ok {\n\t\tb, _ := json.Marshal(cfg)\n\t\tp.logger.Printf(\"consul agent config: %s\", string(b))\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tif cfg.ServiceRegistration.UseLock {\n\t\tdoneCh, err = p.acquireAndKeepLock(ctx, \"gnmic/\"+cfg.clusterName+\"/prometheus-output\", []byte(cfg.ServiceRegistration.id))\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to acquire lock: %v\", err)\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tgoto INITCONSUL\n\t\t}\n\t}\n\n\tttlCheckID := \"ttl:\" + cfg.ServiceRegistration.id\n\tservice := &api.AgentServiceRegistration{\n\t\tID:      cfg.ServiceRegistration.id,\n\t\tName:    cfg.ServiceRegistration.Name,\n\t\tAddress: cfg.address,\n\t\tPort:    cfg.port,\n\t\tTags:    cfg.ServiceRegistration.Tags,\n\t\tChecks: api.AgentServiceChecks{\n\t\t\t{\n\t\t\t\tCheckID:                        ttlCheckID,\n\t\t\t\tTTL:                            cfg.ServiceRegistration.CheckInterval.String(),\n\t\t\t\tDeregisterCriticalServiceAfter: cfg.ServiceRegistration.deregisterAfter,\n\t\t\t},\n\t\t},\n\t}\n\tif cfg.ServiceRegistration.EnableHTTPCheck {\n\t\tservice.Checks = append(service.Checks, &api.AgentServiceCheck{\n\t\t\tCheckID:                        \"http:\" + cfg.ServiceRegistration.id,\n\t\t\tHTTP:                           cfg.ServiceRegistration.httpCheckAddress,\n\t\t\tMethod:                         \"GET\",\n\t\t\tInterval:                       cfg.ServiceRegistration.CheckInterval.String(),\n\t\t\tTLSSkipVerify:                  true,\n\t\t\tDeregisterCriticalServiceAfter: cfg.ServiceRegistration.deregisterAfter,\n\t\t})\n\t}\n\tb, _ := json.Marshal(service)\n\tp.logger.Printf(\"registering service: %s\", string(b))\n\terr = p.consulClient.Agent().ServiceRegister(service)\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to register service in consul: %v\", err)\n\t\treturn\n\t}\n\n\terr = p.consulClient.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to pass TTL check: %v\", err)\n\t}\n\tticker := time.NewTicker(cfg.ServiceRegistration.CheckInterval / 2)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\terr = p.consulClient.Agent().UpdateTTL(ttlCheckID, \"\", api.HealthPassing)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed to update TTL check to Passing: %v\", err)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\terr = p.consulClient.Agent().UpdateTTL(ttlCheckID, ctx.Err().Error(), api.HealthCritical)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed to update TTL check to Critical: %v\", err)\n\t\t\t}\n\t\t\tticker.Stop()\n\t\t\tgoto INITCONSUL\n\t\tcase <-doneCh:\n\t\t\tticker.Stop()\n\t\t\tgoto INITCONSUL\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) setServiceRegistrationDefaults(c *config) {\n\tif c.ServiceRegistration.Address == \"\" {\n\t\tc.ServiceRegistration.Address = defaultServiceRegistrationAddress\n\t}\n\tif c.ServiceRegistration.CheckInterval <= 5*time.Second {\n\t\tc.ServiceRegistration.CheckInterval = defaultRegistrationCheckInterval\n\t}\n\tif c.ServiceRegistration.MaxFail <= 0 {\n\t\tc.ServiceRegistration.MaxFail = defaultMaxServiceFail\n\t}\n\tderegisterTimer := c.ServiceRegistration.CheckInterval * time.Duration(c.ServiceRegistration.MaxFail)\n\tc.ServiceRegistration.deregisterAfter = deregisterTimer.String()\n\n\tif !c.ServiceRegistration.EnableHTTPCheck {\n\t\treturn\n\t}\n\tc.ServiceRegistration.httpCheckAddress = c.ServiceRegistration.HTTPCheckAddress\n\tif c.ServiceRegistration.httpCheckAddress != \"\" {\n\t\tc.ServiceRegistration.httpCheckAddress = filepath.Join(c.ServiceRegistration.httpCheckAddress, c.Path)\n\t\tif !strings.HasPrefix(c.ServiceRegistration.httpCheckAddress, \"http\") {\n\t\t\tc.ServiceRegistration.httpCheckAddress = \"http://\" + c.ServiceRegistration.httpCheckAddress\n\t\t}\n\t\treturn\n\t}\n\tc.ServiceRegistration.httpCheckAddress = filepath.Join(c.Listen, c.Path)\n\tif !strings.HasPrefix(c.ServiceRegistration.httpCheckAddress, \"http\") {\n\t\tc.ServiceRegistration.httpCheckAddress = \"http://\" + c.ServiceRegistration.httpCheckAddress\n\t}\n}\n\nfunc (p *prometheusOutput) acquireLock(ctx context.Context, key string, val []byte) (string, error) {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\", fmt.Errorf(\"config not found\")\n\t}\n\tvar err error\n\tvar acquired = false\n\twriteOpts := new(api.WriteOptions)\n\twriteOpts = writeOpts.WithContext(ctx)\n\tkvPair := &api.KVPair{Key: key, Value: val}\n\tdoneChan := make(chan struct{})\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn \"\", ctx.Err()\n\t\tcase <-doneChan:\n\t\t\treturn \"\", lockers.ErrCanceled\n\t\tdefault:\n\t\t\tacquired = false\n\t\t\tkvPair.Session, _, err = p.consulClient.Session().Create(\n\t\t\t\t&api.SessionEntry{\n\t\t\t\t\tBehavior:  \"delete\",\n\t\t\t\t\tTTL:       time.Duration(cfg.ServiceRegistration.CheckInterval * 2).String(),\n\t\t\t\t\tLockDelay: 0,\n\t\t\t\t},\n\t\t\t\twriteOpts,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed creating session: %v\", err)\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tacquired, _, err = p.consulClient.KV().Acquire(kvPair, writeOpts)\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Printf(\"failed acquiring lock to %q: %v\", kvPair.Key, err)\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif acquired {\n\t\t\t\treturn kvPair.Session, nil\n\t\t\t}\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"failed acquiring lock to %q: already locked\", kvPair.Key)\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}\n}\n\nfunc (p *prometheusOutput) keepLock(ctx context.Context, sessionID string) (chan struct{}, chan error) {\n\n\twriteOpts := new(api.WriteOptions)\n\twriteOpts = writeOpts.WithContext(ctx)\n\tdoneChan := make(chan struct{})\n\terrChan := make(chan error)\n\tgo func() {\n\t\tif sessionID == \"\" {\n\t\t\terrChan <- fmt.Errorf(\"unknown key\")\n\t\t\tclose(doneChan)\n\t\t\treturn\n\t\t}\n\t\tcfg := p.cfg.Load()\n\t\tif cfg == nil {\n\t\t\terrChan <- fmt.Errorf(\"config not found\")\n\t\t\tclose(doneChan)\n\t\t\treturn\n\t\t}\n\t\terr := p.consulClient.Session().RenewPeriodic(\n\t\t\ttime.Duration(cfg.ServiceRegistration.CheckInterval/2).String(),\n\t\t\tsessionID,\n\t\t\twriteOpts,\n\t\t\tdoneChan,\n\t\t)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}()\n\n\treturn doneChan, errChan\n}\n\nfunc (p *prometheusOutput) acquireAndKeepLock(ctx context.Context, key string, val []byte) (chan struct{}, error) {\n\tsessionID, err := p.acquireLock(ctx, key, val)\n\tif err != nil {\n\t\tp.logger.Printf(\"failed to acquire lock: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tdoneCh, errCh := p.keepLock(ctx, sessionID)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(doneCh)\n\t\t\t\treturn\n\t\t\tcase <-doneCh:\n\t\t\t\t_, err := p.consulClient.KV().Delete(key, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Printf(\"failed to delete lock from consul: %v\", err)\n\t\t\t\t}\n\t\t\t\t_, err = p.consulClient.Session().Destroy(sessionID, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.logger.Printf(\"failed to destroy session in consul: %v\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase err := <-errCh:\n\t\t\t\tp.logger.Printf(\"failed maintaining the lock: %v\", err)\n\t\t\t\tclose(doneCh)\n\t\t\t}\n\t\t}\n\t}()\n\treturn doneCh, nil\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_client.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_write_output\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"sort\"\n\t\"time\"\n\n\tgogoproto \"github.com/gogo/protobuf/proto\"\n\t\"github.com/golang/snappy\"\n\t\"github.com/prometheus/prometheus/prompb\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n)\n\nvar (\n\tErrMarshal = errors.New(\"marshal error\")\n)\n\nconst backoff = 100 * time.Millisecond\n\nfunc (p *promWriteOutput) createHTTPClientFor(c *config) (*http.Client, error) {\n\tcl := &http.Client{\n\t\tTimeout: c.Timeout,\n\t}\n\tif c.TLS != nil {\n\t\ttlsCfg, err := utils.NewTLSConfig(\n\t\t\tc.TLS.CaFile,\n\t\t\tc.TLS.CertFile,\n\t\t\tc.TLS.KeyFile,\n\t\t\t\"\",\n\t\t\tc.TLS.SkipVerify,\n\t\t\tfalse,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcl.Transport = &http.Transport{\n\t\t\tTLSClientConfig: tlsCfg,\n\t\t}\n\t}\n\treturn cl, nil\n}\n\nfunc (p *promWriteOutput) writer(ctx context.Context) {\n\tdefer p.wg.Done()\n\tdefer p.logger.Printf(\"writer stopped\")\n\tcfg := p.cfg.Load()\n\tp.logger.Printf(\"starting writer\")\n\tticker := time.NewTicker(cfg.Interval)\n\tdefer ticker.Stop()\n\tfor {\n\t\ttimeSeriesCh := *p.timeSeriesCh.Load()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"write interval reached, writing to remote\")\n\t\t\t}\n\t\t\tp.write(ctx, timeSeriesCh)\n\t\tcase <-p.buffDrainCh:\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"buffer full, writing to remote\")\n\t\t\t}\n\t\t\tp.write(ctx, timeSeriesCh)\n\t\t}\n\t}\n}\n\nfunc (p *promWriteOutput) write(ctx context.Context, timeSeriesCh <-chan *prompb.TimeSeries) {\n\tcfg := p.cfg.Load()\n\tbuffSize := len(timeSeriesCh)\n\tif cfg.Debug {\n\t\tp.logger.Printf(\"write triggered, buffer size: %d\", buffSize)\n\t}\n\tif buffSize == 0 {\n\t\treturn\n\t}\n\tpts := make([]prompb.TimeSeries, 0, buffSize)\n\t// read from buff channel for 1 second or\n\t// until we read a number of timeSeries equal to the buffer size\n\tfor {\n\t\tselect {\n\t\tcase ts := <-timeSeriesCh:\n\t\t\tpts = append(pts, *ts)\n\t\t\tif len(pts) == buffSize {\n\t\t\t\tgoto WRITE\n\t\t\t}\n\t\tcase <-time.After(time.Second):\n\t\t\tgoto WRITE\n\t\t}\n\t}\nWRITE:\n\tnumTS := len(pts)\n\tif numTS == 0 {\n\t\treturn\n\t}\n\t// sort timeSeries by timestamp\n\tsort.Slice(pts, func(i, j int) bool {\n\t\treturn pts[i].Samples[0].Timestamp < pts[j].Samples[0].Timestamp\n\t})\n\tchunk := make([]prompb.TimeSeries, 0, cfg.MaxTimeSeriesPerWrite)\n\tfor i, pt := range pts {\n\t\t// append timeSeries to chunk\n\t\tchunk = append(chunk, pt)\n\t\t// if the chunk size reaches the configured max or\n\t\t// we reach the max number of time series gathered, send.\n\t\tchunkSize := len(chunk)\n\t\tif chunkSize == cfg.MaxTimeSeriesPerWrite || i+1 == numTS {\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"writing a %d time series chunk\", chunkSize)\n\t\t\t}\n\t\t\tstart := time.Now()\n\t\t\terr := p.writeRequest(ctx, &prompb.WriteRequest{\n\t\t\t\tTimeseries: chunk,\n\t\t\t}, cfg)\n\t\t\tif err != nil {\n\t\t\t\tif cfg.Debug {\n\t\t\t\t\tp.logger.Print(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprometheusWriteSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds()))\n\t\t\tprometheusWriteNumberOfSentMsgs.WithLabelValues(cfg.Name).Add(float64(chunkSize))\n\t\t\t// return if we are done with the gathered time series\n\t\t\tif i+1 == numTS {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// reset chunk if we are not done yet\n\t\t\tchunk = make([]prompb.TimeSeries, 0, cfg.MaxTimeSeriesPerWrite)\n\t\t}\n\t}\n}\n\n// writeRequest marshals the supplied prompb.WriteRequest,\n// creates an HTTP request with the proper configured options (Authentication, Headers,...),\n// sends the request and checks the returned response status code.\n// It returns an error if the status code is >=300.\nfunc (p *promWriteOutput) writeRequest(ctx context.Context, wr *prompb.WriteRequest, cfg *config) error {\n\thttpReq, err := p.makeHTTPRequest(ctx, wr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// send request with retries\n\tretries := 0\nRETRY:\n\thttpClient := p.httpClient.Load()\n\t//cfg := p.cfg.Load()\n\trsp, err := httpClient.Do(httpReq)\n\tif err != nil {\n\t\tretries++\n\t\terr = fmt.Errorf(\"failed to write to remote: %w\", err)\n\t\tp.logger.Print(err)\n\t\tif retries < cfg.MaxRetries {\n\t\t\ttime.Sleep(backoff)\n\t\t\tgoto RETRY\n\t\t}\n\t\tprometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, \"client_failure\").Inc()\n\t\treturn err\n\t}\n\tdefer rsp.Body.Close()\n\n\tif cfg.Debug {\n\t\tp.logger.Printf(\"got response from remote: status=%s\", rsp.Status)\n\t}\n\tif rsp.StatusCode >= 300 {\n\t\tprometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, fmt.Sprintf(\"status_code=%d\", rsp.StatusCode)).Inc()\n\t\tmsg, err := io.ReadAll(rsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"write response failed, code=%d, body=%s\", rsp.StatusCode, string(msg))\n\t}\n\treturn nil\n}\n\n// metadataWriter writes the cached metadata entries to the remote address each `metadata.interval`\nfunc (p *promWriteOutput) metadataWriter(ctx context.Context) {\n\tdefer p.wg.Done()\n\tdefer p.logger.Printf(\"metadata writer stopped\")\n\tcfg := p.cfg.Load()\n\tif cfg.Metadata == nil || !cfg.Metadata.Include {\n\t\treturn\n\t}\n\tp.writeMetadata(ctx)\n\tticker := time.NewTicker(cfg.Metadata.Interval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tp.writeMetadata(ctx)\n\t\t}\n\t}\n}\n\n// writeMetadata writes the currently cached metadata entries to the remote address,\n// it will multiple prompb.WriteRequest with at most `metadata.max-entries` each until all entries are sent.\nfunc (p *promWriteOutput) writeMetadata(ctx context.Context) {\n\tcfg := p.cfg.Load()\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\n\tif len(p.metadataCache) == 0 {\n\t\treturn\n\t}\n\n\tmds := make([]prompb.MetricMetadata, 0, cfg.Metadata.MaxEntriesPerWrite)\n\tcount := 0 // keep track of the number of entries in mds\n\n\tfor _, md := range p.metadataCache {\n\t\tif count < cfg.Metadata.MaxEntriesPerWrite {\n\t\t\tcount++\n\t\t\tmds = append(mds, md)\n\t\t\tcontinue\n\t\t}\n\t\t// max entries reached, write accumulated entries\n\t\tif cfg.Debug {\n\t\t\tp.logger.Printf(\"writing %d metadata points\", len(mds))\n\t\t}\n\t\tstart := time.Now()\n\t\terr := p.writeRequest(ctx, &prompb.WriteRequest{\n\t\t\tMetadata: mds,\n\t\t}, cfg)\n\t\tif err != nil {\n\t\t\tprometheusWriteNumberOfFailSendMetadataMsgs.WithLabelValues(cfg.Name).Add(1)\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Print(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tprometheusWriteMetadataSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds()))\n\t\tprometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(cfg.Name).Add(float64(len(mds)))\n\t\t// reset counter and array then continue with the loop\n\t\tcount = 0\n\t\tmds = make([]prompb.MetricMetadata, 0, cfg.Metadata.MaxEntriesPerWrite)\n\t}\n\n\t// no metadata entries to write, return\n\tif len(mds) == 0 {\n\t\treturn\n\t}\n\n\t// loop done with some metadata entries left to write\n\tif cfg.Debug {\n\t\tp.logger.Printf(\"writing %d metadata points\", len(mds))\n\t}\n\tstart := time.Now()\n\terr := p.writeRequest(ctx, &prompb.WriteRequest{\n\t\tMetadata: mds,\n\t}, cfg)\n\tif err != nil {\n\t\tif cfg.Debug {\n\t\t\tp.logger.Print(err)\n\t\t}\n\t\treturn\n\t}\n\tprometheusWriteMetadataSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds()))\n\tprometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(cfg.Name).Add(float64(len(mds)))\n}\n\nfunc (p *promWriteOutput) makeHTTPRequest(ctx context.Context, wr *prompb.WriteRequest) (*http.Request, error) {\n\tcfg := p.cfg.Load()\n\tb, err := gogoproto.Marshal(wr)\n\tif err != nil {\n\t\tprometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, \"marshal_error\").Inc()\n\t\treturn nil, fmt.Errorf(\"marshal error: %w\", err)\n\t}\n\tcompBytes := snappy.Encode(nil, b)\n\thttpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, cfg.URL, bytes.NewBuffer(compBytes))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create HTTP request: %v\", err)\n\t}\n\thttpReq.Header.Set(\"X-Prometheus-Remote-Write-Version\", \"0.1.0\")\n\thttpReq.Header.Set(\"Content-Encoding\", \"snappy\")\n\thttpReq.Header.Set(\"User-Agent\", userAgent)\n\thttpReq.Header.Set(\"Content-Type\", \"application/x-protobuf\")\n\n\tif cfg.Authentication != nil {\n\t\thttpReq.SetBasicAuth(cfg.Authentication.Username, cfg.Authentication.Password)\n\t}\n\n\tif cfg.Authorization != nil && cfg.Authorization.Type != \"\" {\n\t\thttpReq.Header.Set(\"Authorization\", fmt.Sprintf(\"%s %s\", cfg.Authorization.Type, cfg.Authorization.Credentials))\n\t}\n\n\tfor k, v := range cfg.Headers {\n\t\thttpReq.Header.Add(k, v)\n\t}\n\n\treturn httpReq, nil\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_metrics.go",
    "content": "// © 2023 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_write_output\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tnamespace = \"gnmic\"\n\tsubsystem = \"prometheus_write_output\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar prometheusWriteNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"number_of_prometheus_write_msgs_sent_success_total\",\n\tHelp:      \"Number of msgs successfully sent by gnmic prometheus_write output\",\n}, []string{\"name\"})\n\nvar prometheusWriteNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"number_of_prometheus_write_msgs_sent_fail_total\",\n\tHelp:      \"Number of failed msgs sent by gnmic prometheus_write output\",\n}, []string{\"name\", \"reason\"})\n\nvar prometheusWriteSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"msg_send_duration_ns\",\n\tHelp:      \"gnmic prometheus_write output send duration in ns\",\n}, []string{\"name\"})\n\nvar prometheusWriteNumberOfSentMetadataMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"number_of_prometheus_write_metadata_msgs_sent_success_total\",\n\tHelp:      \"Number of metadata msgs successfully sent by gnmic prometheus_write output\",\n}, []string{\"name\"})\n\nvar prometheusWriteNumberOfFailSendMetadataMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"number_of_prometheus_write_metadata_msgs_sent_fail_total\",\n\tHelp:      \"Number of failed metadata msgs sent by gnmic prometheus_write output\",\n}, []string{\"name\", \"reason\"})\n\nvar prometheusWriteMetadataSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: namespace,\n\tSubsystem: subsystem,\n\tName:      \"metadata_msg_send_duration_ns\",\n\tHelp:      \"gnmic prometheus_write output metadata send duration in ns\",\n}, []string{\"name\"})\n\nfunc initMetrics(name string) {\n\t// data msgs metrics\n\tprometheusWriteNumberOfSentMsgs.WithLabelValues(name).Add(0)\n\tprometheusWriteNumberOfFailSendMsgs.WithLabelValues(name, \"\").Add(0)\n\tprometheusWriteSendDuration.WithLabelValues(name).Set(0)\n\t// metadata msgs metrics\n\tprometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(name).Add(0)\n\tprometheusWriteNumberOfFailSendMetadataMsgs.WithLabelValues(name, \"\").Add(0)\n\tprometheusWriteMetadataSendDuration.WithLabelValues(name).Set(0)\n}\n\nfunc (p *promWriteOutput) registerMetrics() error {\n\tcfg := p.cfg.Load()\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif p.reg == nil {\n\t\treturn nil\n\t}\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = p.reg.Register(prometheusWriteNumberOfSentMsgs); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = p.reg.Register(prometheusWriteNumberOfFailSendMsgs); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = p.reg.Register(prometheusWriteSendDuration); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = p.reg.Register(prometheusWriteNumberOfSentMetadataMsgs); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = p.reg.Register(prometheusWriteNumberOfFailSendMetadataMsgs); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif err = p.reg.Register(prometheusWriteMetadataSendDuration); err != nil {\n\t\t\tp.logger.Printf(\"failed to register metric: %v\", err)\n\t\t\treturn\n\t\t}\n\t})\n\tinitMetrics(cfg.Name)\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage prometheus_write_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"slices\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/prometheus/prompb\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tpromcom \"github.com/openconfig/gnmic/pkg/outputs/prometheus_output\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\toutputType                        = \"prometheus_write\"\n\tloggingPrefix                     = \"[prometheus_write_output:%s] \"\n\tdefaultTimeout                    = 10 * time.Second\n\tdefaultWriteInterval              = 10 * time.Second\n\tdefaultMetadataWriteInterval      = time.Minute\n\tdefaultBufferSize                 = 1000\n\tdefaultMaxTSPerWrite              = 500\n\tdefaultMaxMetaDataEntriesPerWrite = 500\n\tdefaultMetricHelp                 = \"gNMIc generated metric\"\n\tuserAgent                         = \"gNMIc prometheus write\"\n\tdefaultNumWorkers                 = 1\n\tdefaultNumWriters                 = 1\n)\n\nfunc init() {\n\toutputs.Register(outputType,\n\t\tfunc() outputs.Output {\n\t\t\treturn &promWriteOutput{}\n\t\t})\n}\n\ntype promWriteOutput struct {\n\toutputs.BaseOutput\n\n\tcfg          *atomic.Pointer[config]\n\tdynCfg       *atomic.Pointer[dynConfig]\n\thttpClient   *atomic.Pointer[http.Client]\n\ttimeSeriesCh *atomic.Pointer[chan *prompb.TimeSeries]\n\n\tlogger      *log.Logger\n\teventChan   chan *formatters.EventMsg\n\tmsgChan     chan *outputs.ProtoMsg\n\tbuffDrainCh chan struct{}\n\n\tm             *sync.Mutex\n\tmetadataCache map[string]prompb.MetricMetadata\n\n\trootCtx  context.Context\n\tcancelFn context.CancelFunc\n\twg       *sync.WaitGroup\n\n\treg   *prometheus.Registry\n\tstore store.Store[any]\n}\n\ntype config struct {\n\tName                  string            `mapstructure:\"name,omitempty\" json:\"name,omitempty\"`\n\tURL                   string            `mapstructure:\"url,omitempty\" json:\"url,omitempty\"`\n\tTimeout               time.Duration     `mapstructure:\"timeout,omitempty\" json:\"timeout,omitempty\"`\n\tHeaders               map[string]string `mapstructure:\"headers,omitempty\" json:\"headers,omitempty\"`\n\tAuthentication        *auth             `mapstructure:\"authentication,omitempty\" json:\"authentication,omitempty\"`\n\tAuthorization         *authorization    `mapstructure:\"authorization,omitempty\" json:\"authorization,omitempty\"`\n\tTLS                   *types.TLSConfig  `mapstructure:\"tls,omitempty\" json:\"tls,omitempty\"`\n\tInterval              time.Duration     `mapstructure:\"interval,omitempty\" json:\"interval,omitempty\"`\n\tBufferSize            int               `mapstructure:\"buffer-size,omitempty\" json:\"buffer-size,omitempty\"`\n\tMaxTimeSeriesPerWrite int               `mapstructure:\"max-time-series-per-write,omitempty\" json:\"max-time-series-per-write,omitempty\"`\n\tMaxRetries            int               `mapstructure:\"max-retries,omitempty\" json:\"max-retries,omitempty\"`\n\tMetadata              *metadata         `mapstructure:\"metadata,omitempty\" json:\"metadata,omitempty\"`\n\tDebug                 bool              `mapstructure:\"debug,omitempty\" json:\"debug,omitempty\"`\n\t//\n\tMetricPrefix           string   `mapstructure:\"metric-prefix,omitempty\" json:\"metric-prefix,omitempty\"`\n\tAppendSubscriptionName bool     `mapstructure:\"append-subscription-name,omitempty\" json:\"append-subscription-name,omitempty\"`\n\tAddTarget              string   `mapstructure:\"add-target,omitempty\" json:\"add-target,omitempty\"`\n\tTargetTemplate         string   `mapstructure:\"target-template,omitempty\" json:\"target-template,omitempty\"`\n\tStringsAsLabels        bool     `mapstructure:\"strings-as-labels,omitempty\" json:\"strings-as-labels,omitempty\"`\n\tEventProcessors        []string `mapstructure:\"event-processors,omitempty\" json:\"event-processors,omitempty\"`\n\tNumWorkers             int      `mapstructure:\"num-workers,omitempty\" json:\"num-workers,omitempty\"`\n\tNumWriters             int      `mapstructure:\"num-writers,omitempty\" json:\"num-writers,omitempty\"`\n\tEnableMetrics          bool     `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n\tmb        *promcom.MetricBuilder\n}\n\ntype auth struct {\n\tUsername string `mapstructure:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword string `mapstructure:\"password,omitempty\" json:\"password,omitempty\"`\n}\n\ntype authorization struct {\n\tType        string `mapstructure:\"type,omitempty\" json:\"type,omitempty\"`\n\tCredentials string `mapstructure:\"credentials,omitempty\" json:\"credentials,omitempty\"`\n}\n\ntype metadata struct {\n\tInclude            bool          `mapstructure:\"include,omitempty\" json:\"include,omitempty\"`\n\tInterval           time.Duration `mapstructure:\"interval,omitempty\" json:\"interval,omitempty\"`\n\tMaxEntriesPerWrite int           `mapstructure:\"max-entries-per-write,omitempty\" json:\"max-entries-per-write,omitempty\"`\n}\n\nfunc (p *promWriteOutput) init() {\n\tp.cfg = new(atomic.Pointer[config])\n\tp.dynCfg = new(atomic.Pointer[dynConfig])\n\tp.httpClient = new(atomic.Pointer[http.Client])\n\tp.timeSeriesCh = new(atomic.Pointer[chan *prompb.TimeSeries])\n\tp.wg = new(sync.WaitGroup)\n\tp.m = new(sync.Mutex)\n\tp.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n\tp.eventChan = make(chan *formatters.EventMsg)\n\tp.msgChan = make(chan *outputs.ProtoMsg)\n\tp.buffDrainCh = make(chan struct{}, 1)\n\tp.metadataCache = make(map[string]prompb.MetricMetadata)\n}\n\nfunc (p *promWriteOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(p.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn formatters.MakeEventProcessors(p.logger, cfg.EventProcessors, ps, tcs, acts)\n}\n\nfunc (p *promWriteOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && p.logger != nil {\n\t\tp.logger.SetOutput(logger.Writer())\n\t\tp.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (p *promWriteOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tp.init()\n\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ncfg.URL == \"\" {\n\t\treturn errors.New(\"missing url field\")\n\t}\n\t_, err = url.Parse(ncfg.URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ncfg.Name == \"\" {\n\t\tncfg.Name = name\n\t}\n\tp.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.store = options.Store\n\n\t// apply logger\n\tp.setLogger(options.Logger)\n\n\t// set defaults\n\tp.setDefaultsFor(ncfg)\n\n\tp.cfg.Store(ncfg)\n\n\t// initialize registry\n\tp.reg = options.Registry\n\terr = p.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// prep dynamic config\n\tdc := new(dynConfig)\n\n\t// initialize event processors\n\tdc.evps, err = p.buildEventProcessors(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize target template\n\tif ncfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if ncfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tdc.mb = &promcom.MetricBuilder{\n\t\tPrefix:                 ncfg.MetricPrefix,\n\t\tAppendSubscriptionName: ncfg.AppendSubscriptionName,\n\t\tStringsAsLabels:        ncfg.StringsAsLabels,\n\t}\n\n\tp.dynCfg.Store(dc)\n\n\t// initialize buffer chan\n\ttimeSeriesCh := make(chan *prompb.TimeSeries, ncfg.BufferSize)\n\tp.timeSeriesCh.Store(&timeSeriesCh)\n\n\tcl, err := p.createHTTPClientFor(ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.httpClient.Store(cl)\n\n\tp.rootCtx = ctx\n\tvar wctx context.Context\n\twctx, p.cancelFn = context.WithCancel(p.rootCtx)\n\n\tp.wg.Add(ncfg.NumWorkers)\n\tfor i := 0; i < ncfg.NumWorkers; i++ {\n\t\tgo p.worker(wctx)\n\t}\n\n\tp.wg.Add(ncfg.NumWriters)\n\tfor i := 0; i < ncfg.NumWriters; i++ {\n\t\tgo p.writer(wctx)\n\t}\n\n\tp.wg.Add(1)\n\tgo p.metadataWriter(wctx)\n\n\tp.logger.Printf(\"initialized prometheus write output %s: %s\", ncfg.Name, p.String())\n\treturn nil\n}\n\nfunc (p *promWriteOutput) Update(ctx context.Context, cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif newCfg.URL == \"\" {\n\t\treturn errors.New(\"missing url field\")\n\t}\n\tif _, err := url.Parse(newCfg.URL); err != nil {\n\t\treturn fmt.Errorf(\"invalid url: %w\", err)\n\t}\n\n\tp.setDefaultsFor(newCfg)\n\n\tcurrCfg := p.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildClient := needsClientRebuild(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\t// Rebuild dynamic config\n\tdc := new(dynConfig)\n\n\t// target template\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tt, err := gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = t.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\t// metric builder\n\tdc.mb = &promcom.MetricBuilder{\n\t\tPrefix:                 newCfg.MetricPrefix,\n\t\tAppendSubscriptionName: newCfg.AppendSubscriptionName,\n\t\tStringsAsLabels:        newCfg.StringsAsLabels,\n\t}\n\n\t// rebuild processors ?\n\tprevDC := p.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = p.buildEventProcessors(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\t// store new dynamic config\n\tp.dynCfg.Store(dc)\n\n\t// rebuild HTTP client if needed\n\tif rebuildClient {\n\t\tnewClient, err := p.createHTTPClientFor(newCfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toldClient := p.httpClient.Swap(newClient)\n\t\tif oldClient != nil {\n\t\t\toldClient.CloseIdleConnections()\n\t\t}\n\t}\n\n\t// store new config\n\tp.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorkers {\n\t\tvar newChan chan *prompb.TimeSeries\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan *prompb.TimeSeries, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewChan = *p.timeSeriesCh.Load()\n\t\t}\n\n\t\trunCtx, cancel := context.WithCancel(p.rootCtx)\n\t\tnewWG := new(sync.WaitGroup)\n\n\t\t// save old pointers\n\t\toldCancel := p.cancelFn\n\t\toldWG := p.wg\n\t\toldTSCh := *p.timeSeriesCh.Load()\n\n\t\t// swap\n\t\tp.cancelFn = cancel\n\t\tp.wg = newWG\n\t\tp.timeSeriesCh.Store(&newChan)\n\n\t\t// restart workers\n\t\tp.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo p.worker(runCtx)\n\t\t}\n\n\t\tp.wg.Add(newCfg.NumWriters)\n\t\tfor i := 0; i < newCfg.NumWriters; i++ {\n\t\t\tgo p.writer(runCtx)\n\t\t}\n\n\t\tp.wg.Add(1)\n\t\tgo p.metadataWriter(runCtx)\n\n\t\t// cancel old workers\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\n\t\tif swapChannel {\n\t\t\t// best effort drain old channel\n\t\tOUTER_LOOP:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase ts, ok := <-oldTSCh:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- ts:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// new channel full, drop message\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak OUTER_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tp.logger.Printf(\"updated prometheus write output: %s\", p.String())\n\treturn nil\n}\n\nfunc (p *promWriteOutput) Validate(cfg map[string]any) error {\n\tncfg := new(config)\n\terr := outputs.DecodeConfig(cfg, ncfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ncfg.URL == \"\" {\n\t\treturn errors.New(\"missing url field\")\n\t}\n\tif _, err := url.Parse(ncfg.URL); err != nil {\n\t\treturn fmt.Errorf(\"invalid url: %w\", err)\n\t}\n\t_, err = gtemplate.CreateTemplate(\"target-template\", ncfg.TargetTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (p *promWriteOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) {\n\tif rsp == nil {\n\t\treturn\n\t}\n\tcfg := p.cfg.Load()\n\n\twctx, cancel := context.WithTimeout(ctx, cfg.Timeout)\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tcase p.msgChan <- outputs.NewProtoMsg(rsp, meta):\n\tcase <-wctx.Done():\n\t\tif cfg.Debug {\n\t\t\tp.logger.Printf(\"writing expired after %s\", cfg.Timeout)\n\t\t}\n\t\treturn\n\t}\n}\n\nfunc (p *promWriteOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {\n\tdc := p.dynCfg.Load()\n\tif dc == nil {\n\t\treturn\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tvar evs = []*formatters.EventMsg{ev}\n\t\tfor _, proc := range dc.evps {\n\t\t\tevs = proc.Apply(evs...)\n\t\t}\n\t\tfor _, pev := range evs {\n\t\t\tp.eventChan <- pev\n\t\t}\n\t}\n}\n\nfunc (p *promWriteOutput) Close() error {\n\tif p.cancelFn != nil {\n\t\tp.cancelFn()\n\t}\n\tp.wg.Wait()\n\n\tclient := p.httpClient.Load()\n\tif client != nil {\n\t\tclient.CloseIdleConnections()\n\t}\n\tp.logger.Printf(\"closed prometheus write output: %s\", p.String())\n\treturn nil\n}\n\nfunc (p *promWriteOutput) String() string {\n\tcfg := p.cfg.Load()\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (p *promWriteOutput) worker(ctx context.Context) {\n\tdefer p.wg.Done()\n\tdefer p.logger.Printf(\"worker stopped\")\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase ev := <-p.eventChan:\n\t\t\tp.workerHandleEvent(ev)\n\t\tcase m := <-p.msgChan:\n\t\t\tp.workerHandleProto(ctx, m)\n\t\t}\n\t}\n}\n\nfunc (p *promWriteOutput) workerHandleProto(_ context.Context, m *outputs.ProtoMsg) {\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\n\tpmsg := m.GetMsg()\n\tswitch pmsg := pmsg.(type) {\n\tcase *gnmi.SubscribeResponse:\n\t\tmeta := m.GetMeta()\n\t\tmeasName := \"default\"\n\t\tif subName, ok := meta[\"subscription-name\"]; ok {\n\t\t\tmeasName = subName\n\t\t}\n\t\tvar err error\n\t\tpmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t}\n\t\tevents, err := formatters.ResponseToEventMsgs(measName, pmsg, meta, dc.evps...)\n\t\tif err != nil {\n\t\t\tp.logger.Printf(\"failed to convert message to event: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, ev := range events {\n\t\t\tp.workerHandleEvent(ev)\n\t\t}\n\t}\n}\n\nfunc (p *promWriteOutput) workerHandleEvent(ev *formatters.EventMsg) {\n\tcfg := p.cfg.Load()\n\tdc := p.dynCfg.Load()\n\ttsCh := p.timeSeriesCh.Load()\n\n\tif cfg.Debug {\n\t\tp.logger.Printf(\"got event to buffer: %+v\", ev)\n\t}\n\tfor _, pts := range dc.mb.TimeSeriesFromEvent(ev) {\n\t\tif len(*tsCh) >= cfg.BufferSize {\n\t\t\tif cfg.Debug {\n\t\t\t\tp.logger.Printf(\"buffer size reached, triggering write\")\n\t\t\t}\n\t\t\tp.buffDrainCh <- struct{}{}\n\t\t}\n\t\t// populate metadata cache\n\t\tp.m.Lock()\n\t\tif cfg.Debug {\n\t\t\tp.logger.Printf(\"saving metrics metadata\")\n\t\t}\n\t\tp.metadataCache[pts.Name] = prompb.MetricMetadata{\n\t\t\tType:             prompb.MetricMetadata_COUNTER,\n\t\t\tMetricFamilyName: pts.Name,\n\t\t\tHelp:             defaultMetricHelp,\n\t\t}\n\t\tp.m.Unlock()\n\t\t// write time series to buffer\n\t\tif cfg.Debug {\n\t\t\tp.logger.Printf(\"writing TimeSeries to buffer\")\n\t\t}\n\t\t*tsCh <- pts.TS\n\t}\n}\n\nfunc (p *promWriteOutput) setDefaultsFor(c *config) {\n\tif c.Timeout <= 0 {\n\t\tc.Timeout = defaultTimeout\n\t}\n\tif c.Interval <= 0 {\n\t\tc.Interval = defaultWriteInterval\n\t}\n\tif c.BufferSize <= 0 {\n\t\tc.BufferSize = defaultBufferSize\n\t}\n\tif c.NumWorkers <= 0 {\n\t\tc.NumWorkers = defaultNumWorkers\n\t}\n\tif c.NumWriters <= 0 {\n\t\tc.NumWriters = defaultNumWriters\n\t}\n\tif c.MaxTimeSeriesPerWrite <= 0 {\n\t\tc.MaxTimeSeriesPerWrite = defaultMaxTSPerWrite\n\t}\n\tif c.Metadata == nil {\n\t\tc.Metadata = &metadata{\n\t\t\tInclude:            true,\n\t\t\tInterval:           defaultMetadataWriteInterval,\n\t\t\tMaxEntriesPerWrite: defaultMaxMetaDataEntriesPerWrite,\n\t\t}\n\t\treturn\n\t}\n\tif c.Metadata.Include {\n\t\tif c.Metadata.Interval <= 0 {\n\t\t\tc.Metadata.Interval = defaultMetadataWriteInterval\n\t\t}\n\t\tif c.Metadata.MaxEntriesPerWrite <= 0 {\n\t\t\tc.Metadata.MaxEntriesPerWrite = defaultMaxMetaDataEntriesPerWrite\n\t\t}\n\t}\n}\n\n// Helper functions\n\nfunc channelNeedsSwap(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers ||\n\t\told.NumWriters != nw.NumWriters ||\n\t\told.Interval != nw.Interval ||\n\t\tmetadataChanged(old.Metadata, nw.Metadata)\n}\n\nfunc metadataChanged(old, nw *metadata) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.Include != nw.Include ||\n\t\told.Interval != nw.Interval ||\n\t\told.MaxEntriesPerWrite != nw.MaxEntriesPerWrite\n}\n\nfunc needsClientRebuild(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.URL != nw.URL ||\n\t\told.Timeout != nw.Timeout ||\n\t\t!old.TLS.Equal(nw.TLS) ||\n\t\t!authEq(old.Authentication, nw.Authentication) ||\n\t\t!authzEq(old.Authorization, nw.Authorization)\n}\n\nfunc authEq(a, b *auth) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.Username == b.Username && a.Password == b.Password\n}\n\nfunc authzEq(a, b *authorization) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\treturn a.Type == b.Type && a.Credentials == b.Credentials\n}\n"
  },
  {
    "path": "pkg/outputs/protometa.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage outputs\n\nimport (\n\t\"google.golang.org/protobuf/proto\"\n)\n\ntype ProtoMsg struct {\n\tm    proto.Message\n\tmeta Meta\n}\n\nfunc NewProtoMsg(m proto.Message, meta Meta) *ProtoMsg {\n\treturn &ProtoMsg{\n\t\tm:    m,\n\t\tmeta: meta,\n\t}\n}\n\nfunc (m *ProtoMsg) GetMsg() proto.Message {\n\tif m == nil {\n\t\treturn nil\n\t}\n\treturn m.m\n}\n\nfunc (m *ProtoMsg) GetMeta() Meta {\n\tif m == nil {\n\t\treturn nil\n\t}\n\treturn m.meta\n}\n"
  },
  {
    "path": "pkg/outputs/snmp_output/snmp_metrics.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage snmpoutput\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar registerMetricsOnce sync.Once\n\nvar snmpNumberOfSentTraps = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"snmp_output\",\n\tName:      \"number_of_snmp_traps_sent_total\",\n\tHelp:      \"Number of SNMP trap sent by gnmic SNMP output\",\n}, []string{\"name\", \"trap_index\"})\n\nvar snmpNumberOfTrapSendFailureTraps = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"snmp_output\",\n\tName:      \"number_of_snmp_trap_sent_fail_total\",\n\tHelp:      \"Number of SNMP trap sending failures\",\n}, []string{\"name\", \"trap_index\", \"reason\"})\n\nvar snmpNumberOfFailedTrapGeneration = prometheus.NewCounterVec(prometheus.CounterOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"snmp_output\",\n\tName:      \"number_of_snmp_trap_failed_generation\",\n\tHelp:      \"Number of failed trap generation\",\n}, []string{\"name\", \"trap_index\", \"reason\"})\n\nvar snmpTrapGenerationDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\tNamespace: \"gnmic\",\n\tSubsystem: \"snmp_output\",\n\tName:      \"snmp_trap_generation_duration_ns\",\n\tHelp:      \"SNMP trap generation duration in ns\",\n}, []string{\"name\", \"trap_index\"})\n\nfunc (s *snmpOutput) initMetrics() {\n\tsnmpNumberOfSentTraps.WithLabelValues(s.name, \"0\").Add(0)\n\tsnmpNumberOfTrapSendFailureTraps.WithLabelValues(s.name, \"0\", \"\").Add(0)\n\tsnmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, \"0\", \"\").Add(0)\n\tsnmpTrapGenerationDuration.WithLabelValues(s.name, \"0\").Set(0)\n}\n\nfunc (s *snmpOutput) registerMetrics() error {\n\tcfg := s.cfg.Load()\n\tif cfg == nil {\n\t\treturn nil\n\t}\n\tif !cfg.EnableMetrics {\n\t\treturn nil\n\t}\n\tif s.reg == nil {\n\t\ts.logger.Printf(\"ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`\")\n\t\treturn nil\n\t}\n\n\tvar err error\n\tregisterMetricsOnce.Do(func() {\n\t\tif err = s.reg.Register(snmpNumberOfSentTraps); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = s.reg.Register(snmpNumberOfTrapSendFailureTraps); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = s.reg.Register(snmpNumberOfFailedTrapGeneration); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = s.reg.Register(snmpTrapGenerationDuration); err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\ts.initMetrics()\n\treturn err\n}\n"
  },
  {
    "path": "pkg/outputs/snmp_output/snmp_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage snmpoutput\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\tg \"github.com/gosnmp/gosnmp\"\n\t\"github.com/itchyny/gojq\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/path\"\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/cache\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tloggingPrefix           = \"[snmp_output:%s] \"\n\tdefaultPort             = 162\n\tdefaultCommunity        = \"public\"\n\tminStartDelay           = 5 * time.Second\n\tinitialEventsBufferSize = 1000\n\t//\n\tsysUpTimeInstanceOID = \"1.3.6.1.2.1.1.3.0\"\n)\n\nfunc init() {\n\toutputs.Register(\"snmp\", func() outputs.Output {\n\t\treturn &snmpOutput{}\n\t})\n}\n\ntype snmpOutput struct {\n\toutputs.BaseOutput\n\tname       string\n\tcfg        *atomic.Pointer[Config]\n\tdynCfg     *atomic.Pointer[dynConfig]\n\tsnmpClient *atomic.Pointer[g.Handler]\n\tlogger     *log.Logger\n\trootCtx    context.Context\n\tcancelFn   context.CancelFunc\n\teventChan  chan *formatters.EventMsg\n\twg         *sync.WaitGroup\n\tcache      cache.Cache\n\tstartTime  time.Time\n\n\treg   *prometheus.Registry\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n}\n\ntype Config struct {\n\tAddress         string        `mapstructure:\"address,omitempty\" json:\"address,omitempty\"`\n\tPort            uint16        `mapstructure:\"port,omitempty\" json:\"port,omitempty\"`\n\tCommunity       string        `mapstructure:\"community,omitempty\" json:\"community,omitempty\"`\n\tStartDelay      time.Duration `mapstructure:\"start-delay,omitempty\" json:\"start-delay,omitempty\"`\n\tTraps           []*trap       `mapstructure:\"traps,omitempty\" json:\"traps,omitempty\"`\n\tAddTarget       string        `mapstructure:\"add-target,omitempty\" json:\"add-target,omitempty\"`\n\tTargetTemplate  string        `mapstructure:\"target-template,omitempty\" json:\"target-template,omitempty\"`\n\tEnableMetrics   bool          `mapstructure:\"enable-metrics,omitempty\" json:\"enable-metrics,omitempty\"`\n\tEventProcessors []string      `mapstructure:\"event-processors,omitempty\" json:\"event-processors,omitempty\"`\n}\n\ntype binding struct {\n\tPath  string `mapstructure:\"path,omitempty\" json:\"path,omitempty\"`\n\tOID   string `mapstructure:\"oid,omitempty\" json:\"oid,omitempty\"`\n\tType  string `mapstructure:\"type,omitempty\" json:\"type,omitempty\"`\n\tValue string `mapstructure:\"value,omitempty\" json:\"value,omitempty\"`\n\n\tpathTemplate *gojq.Code\n\toidTemplate  *gojq.Code\n\tvalTemplate  *gojq.Code\n}\n\ntype trap struct {\n\tInformPDU bool       `mapstructure:\"inform,omitempty\" json:\"inform,omitempty\"`\n\tTrigger   *binding   `mapstructure:\"trigger,omitempty\" json:\"trigger,omitempty\"`\n\tBindings  []*binding `mapstructure:\"bindings,omitempty\" json:\"bindings,omitempty\"`\n}\n\nfunc (s *snmpOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(s.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (s *snmpOutput) setLogger(logger *log.Logger) {\n\tif logger != nil && s.logger != nil {\n\t\ts.logger.SetOutput(logger.Writer())\n\t\ts.logger.SetFlags(logger.Flags())\n\t}\n}\n\nfunc (s *snmpOutput) init() {\n\ts.cfg = new(atomic.Pointer[Config])\n\ts.dynCfg = new(atomic.Pointer[dynConfig])\n\ts.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n\ts.eventChan = make(chan *formatters.EventMsg, initialEventsBufferSize)\n\ts.snmpClient = new(atomic.Pointer[g.Handler])\n\ts.wg = new(sync.WaitGroup)\n}\n\nfunc (s *snmpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\ts.init() // init struct fields\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.name = name //TODO: atomic ?\n\ts.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.store = options.Store\n\n\t// apply logger\n\ts.setLogger(options.Logger)\n\ts.setDefaultsFor(newCfg)\n\n\ts.cfg.Store(newCfg)\n\n\tif len(newCfg.Traps) == 0 {\n\t\treturn errors.New(\"missing traps definition\")\n\t}\n\tdc := new(dynConfig)\n\tdc.evps, err = s.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdc.targetTpl = outputs.DefaultTargetTemplate\n\tif newCfg.TargetTemplate != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\ts.dynCfg.Store(dc)\n\t// initialize registry\n\ts.reg = options.Registry\n\terr = s.registerMetrics()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// initialize traps\n\terr = s.initializeTrapsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.cache, err = cache.New(&cache.Config{Expiration: -1}, cache.WithLogger(s.logger))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.rootCtx = ctx\n\tctx, s.cancelFn = context.WithCancel(s.rootCtx)\n\ts.startTime = time.Now()\n\ts.wg.Add(1)\n\tgo s.start(ctx)\n\ts.logger.Printf(\"initialized SNMP output: %s\", s.String())\n\treturn nil\n}\n\nfunc (s *snmpOutput) Validate(cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(newCfg.Traps) == 0 {\n\t\treturn errors.New(\"missing traps definition\")\n\t}\n\treturn s.initializeTrapsFor(newCfg)\n}\n\nfunc (s *snmpOutput) Update(_ context.Context, cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.setDefaultsFor(newCfg)\n\terr = s.initializeTrapsFor(newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcurrCfg := s.cfg.Load()\n\tprevDC := s.dynCfg.Load()\n\tdc := new(dynConfig)\n\n\tprocessorsChanged := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\tif processorsChanged {\n\t\tdc.evps, err = s.buildEventProcessors(s.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\ts.dynCfg.Store(dc)\n\ts.cfg.Store(newCfg)\n\t// cancel old context if running\n\tif s.cancelFn != nil {\n\t\ts.cancelFn()\n\t\ts.wg.Wait()\n\t}\n\n\t// create new context and start new loop\n\tvar ctx context.Context\n\tctx, s.cancelFn = context.WithCancel(s.rootCtx)\n\ts.wg.Add(1)\n\tgo s.start(ctx)\n\ts.logger.Printf(\"updated SNMP output: %s\", s.String())\n\treturn nil\n}\n\nfunc (s *snmpOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := s.cfg.Load()\n\tdc := s.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\ts.logger,\n\t\ts.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\ts.dynCfg.Store(&newDC)\n\t\ts.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (s *snmpOutput) initializeTrapsFor(cfg *Config) error {\n\tvar err error\n\tfor i, trap := range cfg.Traps {\n\t\tif trap.Trigger == nil {\n\t\t\treturn fmt.Errorf(\"trap index %d missing \\\"trigger\\\"\", i)\n\t\t}\n\t\tif trap.Trigger.Path == \"\" {\n\t\t\treturn fmt.Errorf(\"trap index %d missing \\\"path\\\"\", i)\n\t\t}\n\t\t// init trap and bindings\n\t\ttrap.Trigger.oidTemplate, err = parseJQ(trap.Trigger.OID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttrap.Trigger.valTemplate, err = parseJQ(trap.Trigger.Value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, bd := range trap.Bindings {\n\t\t\tbd.pathTemplate, err = parseJQ(bd.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbd.oidTemplate, err = parseJQ(bd.OID)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbd.valTemplate, err = parseJQ(bd.Value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *snmpOutput) Write(ctx context.Context, m proto.Message, meta outputs.Meta) {\n\tif m == nil {\n\t\treturn\n\t}\n\n\tcfg := s.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tdc := s.dynCfg.Load()\n\t\tif dc == nil {\n\t\t\treturn\n\t\t}\n\t\trsp, err := outputs.AddSubscriptionTarget(m, meta, \"if-not-present\", dc.targetTpl)\n\t\tif err != nil {\n\t\t\ts.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tmeasName := meta[\"subscription-name\"]\n\t\tif measName == \"\" {\n\t\t\tmeasName = \"default\"\n\t\t}\n\n\t\ts.cache.Write(ctx, measName, rsp)\n\n\t\tevents, err := formatters.ResponseToEventMsgs(measName, rsp, meta, dc.evps...)\n\t\tif err != nil {\n\t\t\ts.logger.Printf(\"failed to convert message to event: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, ev := range events {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase s.eventChan <- ev:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *snmpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\nfunc (s *snmpOutput) Close() error {\n\ts.cancelFn()\n\ts.wg.Wait()\n\tsnmpClient := s.snmpClient.Load()\n\tif snmpClient != nil {\n\t\treturn (*snmpClient).Close()\n\t}\n\treturn nil\n}\n\nfunc (s *snmpOutput) String() string {\n\tcfg := s.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (s *snmpOutput) start(ctx context.Context) {\n\tdefer s.wg.Done()\n\ts.createSNMPHandler()\n\tvar init = true\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase ev := <-s.eventChan:\n\t\t\tif ev == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcfg := s.cfg.Load()\n\t\t\tif cfg == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif init {\n\t\t\t\t<-time.After(cfg.StartDelay)\n\t\t\t\tinit = false\n\t\t\t}\n\t\t\tfor idx := range cfg.Traps {\n\t\t\t\terr := s.handleEvent(cfg, ev, idx)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.logger.Printf(\"failed to handle event %+v : %v\", ev, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *snmpOutput) setDefaultsFor(cfg *Config) {\n\tif cfg.Port <= 0 {\n\t\tcfg.Port = defaultPort\n\t}\n\tif cfg.Community == \"\" {\n\t\tcfg.Community = defaultCommunity\n\t}\n\tif cfg.StartDelay < minStartDelay {\n\t\tcfg.StartDelay = minStartDelay\n\t}\n}\n\nfunc (s *snmpOutput) createSNMPHandler() {\n\tcfg := s.cfg.Load()\n\tif cfg == nil {\n\t\treturn\n\t}\n\tsnmpClient := g.NewHandler()\n\tsnmpClient.SetTarget(cfg.Address)\n\tsnmpClient.SetCommunity(cfg.Community)\n\tsnmpClient.SetPort(cfg.Port)\n\tsnmpClient.SetVersion(g.Version2c)\nCONN:\n\terr := snmpClient.Connect()\n\tif err != nil {\n\t\ts.logger.Printf(\"failed to connect: %v\", err)\n\t\ttime.Sleep(time.Second)\n\t\tgoto CONN\n\t}\n\ts.logger.Print(\"SNMP connected\")\n\ts.snmpClient.Store(&snmpClient)\n}\n\nfunc pduType(typ string) g.Asn1BER {\n\tswitch typ {\n\tcase \"bool\":\n\t\treturn g.Boolean\n\tcase \"int\":\n\t\treturn g.Integer\n\tcase \"bitString\":\n\t\treturn g.BitString\n\tcase \"octetString\":\n\t\treturn g.OctetString\n\tcase \"null\":\n\t\treturn g.Null\n\tcase \"objectID\":\n\t\treturn g.ObjectIdentifier\n\tcase \"objectDescription\":\n\t\treturn g.ObjectDescription\n\tcase \"ipAddress\":\n\t\treturn g.IPAddress\n\tcase \"counter32\":\n\t\treturn g.Counter32\n\tcase \"gauge32\":\n\t\treturn g.Gauge32\n\tcase \"timeTicks\":\n\t\treturn g.TimeTicks\n\tcase \"opaque\":\n\t\treturn g.Opaque\n\tcase \"nsapAddress\":\n\t\treturn g.NsapAddress\n\tcase \"counter64\":\n\t\treturn g.Counter64\n\tcase \"uint32\":\n\t\treturn g.Uinteger32\n\tcase \"opaqueFloat\":\n\t\treturn g.OpaqueFloat\n\tcase \"opaqueDouble\":\n\t\treturn g.OpaqueDouble\n\t}\n\treturn g.UnknownType\n}\n\nfunc parseJQ(code string) (*gojq.Code, error) {\n\tq, err := gojq.Parse(strings.TrimSpace(code))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gojq.Compile(q)\n}\n\nfunc (s *snmpOutput) runJQ(code *gojq.Code, ev map[string]interface{}) (interface{}, error) {\n\titer := code.Run(ev)\n\tfor {\n\t\tr, ok := iter.Next()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tswitch r := r.(type) {\n\t\tcase error:\n\t\t\treturn nil, r\n\t\tdefault:\n\t\t\treturn r, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n\nfunc (s *snmpOutput) handleEvent(cfg *Config, ev *formatters.EventMsg, idx int) error {\n\ttrap := cfg.Traps[idx]\n\t// trigger ?\n\tif _, ok := ev.Values[trap.Trigger.Path]; !ok {\n\t\treturn nil\n\t}\n\tstart := time.Now()\n\tvar err error\n\tvar target string\n\n\tif tg, ok := ev.Tags[\"source\"]; ok {\n\t\ttarget = tg\n\t} else if tg, ok := ev.Tags[\"target\"]; ok {\n\t\ttarget = tg\n\t} else {\n\t\terr = errors.New(\"missing 'source' or 'target' field\")\n\t\tsnmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx), err.Error()).Inc()\n\t\treturn err\n\t}\n\t//\n\tpdus := make([]g.SnmpPDU, 0, len(trap.Bindings)+2)\n\n\t// append systemUptime pdu\n\tpdus = append(pdus, g.SnmpPDU{\n\t\tName:  sysUpTimeInstanceOID,\n\t\tType:  g.TimeTicks,\n\t\tValue: uint32(time.Since(s.startTime).Seconds()),\n\t})\n\n\tpdu, err := s.buildTriggerPDU(trap.Trigger, target, ev)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to build PDU from trigger: %v\", err)\n\t\tsnmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx), err.Error()).Inc()\n\t\treturn err\n\t}\n\n\tpdus = append(pdus, pdu)\n\n\tfor i, bd := range trap.Bindings {\n\t\tpdu, err := s.buildPDUFromCache(bd, target, ev)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to build PDU from binding index %d: %v\", i, err)\n\t\t\ts.logger.Printf(\"%v\", err)\n\t\t\tsnmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx), err.Error()).Inc()\n\t\t\tcontinue\n\t\t}\n\n\t\tpdus = append(pdus, pdu)\n\t}\n\t//\n\tsnmpNumberOfSentTraps.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx)).Add(1)\n\tsnmpClient := s.snmpClient.Load()\n\tif snmpClient == nil {\n\t\treturn fmt.Errorf(\"SNMP client not initialized\")\n\t}\n\t_, err = (*snmpClient).SendTrap(g.SnmpTrap{\n\t\tVariables: pdus,\n\t\tIsInform:  trap.InformPDU,\n\t})\n\tif err != nil {\n\t\tsnmpNumberOfTrapSendFailureTraps.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx), err.Error()).Inc()\n\t\treturn fmt.Errorf(\"failed to send trap: %v\", err)\n\t}\n\tsnmpTrapGenerationDuration.WithLabelValues(s.name, fmt.Sprintf(\"%d\", idx)).Set(float64(time.Since(start).Nanoseconds()))\n\treturn nil\n}\n\nfunc (s *snmpOutput) buildTriggerPDU(bd *binding, targetName string, ev *formatters.EventMsg) (g.SnmpPDU, error) {\n\tvar oid string\n\tvar val any\n\tinput := ev.ToMap()\n\toidResult, err := s.runJQ(bd.oidTemplate, input)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"failed to run OID JQ: %v\", err)\n\t}\n\n\tvar ok bool\n\toid, ok = oidResult.(string)\n\tif !ok {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"unexpected OID result type: %T\", oidResult)\n\t}\n\tval, err = s.runJQ(bd.valTemplate, input)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"failed to run Value JQ: %v\", err)\n\t}\n\n\tpdu := g.SnmpPDU{\n\t\tName:  oid,\n\t\tType:  pduType(bd.Type),\n\t\tValue: val,\n\t}\n\treturn pdu, nil\n}\n\nfunc (s *snmpOutput) buildPDUFromCache(bd *binding, targetName string, ev *formatters.EventMsg) (g.SnmpPDU, error) {\n\tinput := ev.ToMap()\n\tpathResult, err := s.runJQ(bd.pathTemplate, input)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"failed to run path JQ: %v\", err)\n\t}\n\txpath, ok := pathResult.(string)\n\tif !ok {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"unexpected XPATH result type: %T\", pathResult)\n\t}\n\n\tgp, err := path.ParsePath(xpath)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, err\n\t}\n\trsps, err := s.cache.Read(\"*\", targetName, gp)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, err\n\t}\n\tevs := make([]*formatters.EventMsg, 0)\n\tfor subName, notifs := range rsps {\n\t\tfor _, notif := range notifs {\n\t\t\trevs, err := formatters.ResponseToEventMsgs(ev.Name, &gnmi.SubscribeResponse{\n\t\t\t\tResponse: &gnmi.SubscribeResponse_Update{\n\t\t\t\t\tUpdate: notif,\n\t\t\t\t},\n\t\t\t}, map[string]string{\"subscription-name\": subName})\n\t\t\tif err != nil {\n\t\t\t\treturn g.SnmpPDU{}, err\n\t\t\t}\n\t\t\tevs = append(evs, revs...)\n\t\t}\n\t}\n\n\tif len(evs) != 1 {\n\t\treturn g.SnmpPDU{}, errors.New(\"failed to build PDU, corresponding value not found or too many values found\")\n\t}\n\n\tpduInput := evs[0].ToMap()\n\toidResult, err := s.runJQ(bd.oidTemplate, pduInput)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"failed to run OID JQ: %v\", err)\n\t}\n\n\toid, ok := oidResult.(string)\n\tif !ok {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"unexpected OID result type: %T\", oidResult)\n\t}\n\tval, err := s.runJQ(bd.valTemplate, pduInput)\n\tif err != nil {\n\t\treturn g.SnmpPDU{}, fmt.Errorf(\"failed to run Value JQ: %v\", err)\n\t}\n\n\tpdu := g.SnmpPDU{\n\t\tName:  oid,\n\t\tType:  pduType(bd.Type),\n\t\tValue: val,\n\t}\n\treturn pdu, nil\n}\n"
  },
  {
    "path": "pkg/outputs/tcp_output/tcp_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage tcp_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultRetryTimer = 2 * time.Second\n\tdefaultNumWorkers = 1\n\tloggingPrefix     = \"[tcp_output:%s] \"\n)\n\nfunc init() {\n\toutputs.Register(\"tcp\", func() outputs.Output {\n\t\treturn &tcpOutput{}\n\t})\n}\n\ntype tcpOutput struct {\n\toutputs.BaseOutput\n\n\tcfg      *atomic.Pointer[config]\n\tdynCfg   *atomic.Pointer[dynConfig]\n\trootCtx  context.Context\n\tcancelFn context.CancelFunc\n\twg       *sync.WaitGroup\n\tbuffer   *atomic.Pointer[chan []byte]\n\tlogger   *log.Logger\n\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n\tdelimiter []byte\n\tlimiter   *time.Ticker\n}\n\ntype config struct {\n\tAddress            string        `mapstructure:\"address,omitempty\"` // ip:port\n\tRate               time.Duration `mapstructure:\"rate,omitempty\"`\n\tBufferSize         uint          `mapstructure:\"buffer-size,omitempty\"`\n\tFormat             string        `mapstructure:\"format,omitempty\"`\n\tAddTarget          string        `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string        `mapstructure:\"target-template,omitempty\"`\n\tOverrideTimestamps bool          `mapstructure:\"override-timestamps,omitempty\"`\n\tSplitEvents        bool          `mapstructure:\"split-events,omitempty\"`\n\tDelimiter          string        `mapstructure:\"delimiter,omitempty\"`\n\tKeepAlive          time.Duration `mapstructure:\"keep-alive,omitempty\"`\n\tRetryInterval      time.Duration `mapstructure:\"retry-interval,omitempty\"`\n\tNumWorkers         int           `mapstructure:\"num-workers,omitempty\"`\n\tEnableMetrics      bool          `mapstructure:\"enable-metrics,omitempty\"`\n\tEventProcessors    []string      `mapstructure:\"event-processors,omitempty\"`\n}\n\nfunc (t *tcpOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(t.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (t *tcpOutput) init() {\n\tt.cfg = new(atomic.Pointer[config])\n\tt.dynCfg = new(atomic.Pointer[dynConfig])\n\tt.buffer = new(atomic.Pointer[chan []byte])\n\tt.wg = new(sync.WaitGroup)\n\tt.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\nfunc (t *tcpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tt.init()\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\tt.cfg.Store(newCfg)\n\tt.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt.store = options.Store\n\n\t// apply logger\n\tif options.Logger != nil && t.logger != nil {\n\t\tt.logger.SetOutput(options.Logger.Writer())\n\t\tt.logger.SetFlags(options.Logger.Flags())\n\t}\n\n\tdc := new(dynConfig)\n\t// initialize event processors\n\tdc.evps, err = t.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\t_, _, err = net.SplitHostPort(newCfg.Address)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"wrong address format: %v\", err)\n\t}\n\tch := make(chan []byte, newCfg.BufferSize)\n\tt.buffer.Store(&ch)\n\tif newCfg.Rate > 0 {\n\t\tdc.limiter = time.NewTicker(newCfg.Rate)\n\t}\n\tif len(newCfg.Delimiter) > 0 {\n\t\tdc.delimiter = []byte(newCfg.Delimiter)\n\t}\n\n\tt.dynCfg.Store(dc)\n\tt.cfg.Store(newCfg)\n\tt.rootCtx = ctx\n\tctx, t.cancelFn = context.WithCancel(t.rootCtx)\n\tt.wg.Add(newCfg.NumWorkers)\n\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\tgo t.start(ctx, i)\n\t}\n\treturn nil\n}\n\nfunc setDefaultsFor(cfg *config) {\n\tif cfg.RetryInterval == 0 {\n\t\tcfg.RetryInterval = defaultRetryTimer\n\t}\n\tif cfg.NumWorkers < 1 {\n\t\tcfg.NumWorkers = defaultNumWorkers\n\t}\n}\n\nfunc validate(cfg *config) error {\n\tif cfg.Address == \"\" {\n\t\treturn errors.New(\"address is required\")\n\t}\n\t_, _, err := net.SplitHostPort(cfg.Address)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"wrong address format: %v\", err)\n\t}\n\tif cfg.TargetTemplate == \"\" {\n\t\treturn errors.New(\"target-template is required\")\n\t}\n\treturn nil\n}\n\nfunc (t *tcpOutput) Validate(cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\treturn validate(newCfg)\n}\n\nfunc (t *tcpOutput) Update(_ context.Context, cfg map[string]any) error {\n\tnewCfg := new(config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\tcurrCfg := t.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorkers := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\tdc := new(dynConfig)\n\tprevDC := t.dynCfg.Load()\n\tif rebuildProcessors {\n\t\tdc.evps, err = t.buildEventProcessors(t.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\tdc.delimiter = []byte(newCfg.Delimiter)\n\tif newCfg.Rate > 0 {\n\t\t// if rate changed\n\t\tif currCfg.Rate != newCfg.Rate {\n\t\t\tif prevDC != nil && prevDC.limiter != nil {\n\t\t\t\tprevDC.limiter.Stop()\n\t\t\t}\n\t\t\tdc.limiter = time.NewTicker(newCfg.Rate)\n\t\t} else {\n\t\t\tdc.limiter = prevDC.limiter\n\t\t}\n\t} else if prevDC != nil && prevDC.limiter != nil { // stop old limiter if any\n\t\tprevDC.limiter.Stop()\n\t}\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\tt.dynCfg.Store(dc)\n\tt.cfg.Store(newCfg)\n\toldChan := *t.buffer.Load()\n\toldWg := t.wg\n\tt.wg = new(sync.WaitGroup)\n\toldCancel := t.cancelFn\n\tif swapChannel || restartWorkers {\n\t\tvar newChan chan []byte\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan []byte, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewChan = oldChan\n\t\t}\n\t\t// swap channel\n\t\tt.buffer.Store(&newChan)\n\n\t\tvar ctx context.Context\n\t\tctx, t.cancelFn = context.WithCancel(t.rootCtx)\n\t\tt.wg.Add(newCfg.NumWorkers)\n\t\tfor i := 0; i < newCfg.NumWorkers; i++ {\n\t\t\tgo t.start(ctx, i)\n\t\t}\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWg != nil {\n\t\t\toldWg.Wait()\n\t\t}\n\t\tif swapChannel {\n\t\tDRAIN_LOOP:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase b, ok := <-oldChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- b:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// new channel full, drop message\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak DRAIN_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.logger.Printf(\"restarted TCP output workers\")\n\t} else {\n\t\tt.logger.Printf(\"no changes to TCP output\")\n\t}\n\tt.logger.Printf(\"updated TCP output: %s\", t.String())\n\treturn nil\n}\n\nfunc (t *tcpOutput) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := t.cfg.Load()\n\tdc := t.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tt.logger,\n\t\tt.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tt.dynCfg.Store(&newDC)\n\t\tt.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (t *tcpOutput) Write(ctx context.Context, m proto.Message, meta outputs.Meta) {\n\tif m == nil {\n\t\treturn\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tcfg := t.cfg.Load()\n\t\tdc := t.dynCfg.Load()\n\t\trsp, err := outputs.AddSubscriptionTarget(m, meta, cfg.AddTarget, dc.targetTpl)\n\t\tif err != nil {\n\t\t\tt.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t}\n\t\tbb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...)\n\t\tif err != nil {\n\t\t\tt.logger.Printf(\"failed marshaling proto msg: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tbuffer := t.buffer.Load()\n\t\tfor _, b := range bb {\n\t\t\t(*buffer) <- b\n\t\t}\n\t}\n}\n\nfunc (t *tcpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\nfunc (t *tcpOutput) Close() error {\n\tt.cancelFn()\n\tt.wg.Wait()\n\tdc := t.dynCfg.Load()\n\tif dc != nil && dc.limiter != nil {\n\t\tdc.limiter.Stop()\n\t}\n\treturn nil\n}\n\nfunc (t *tcpOutput) String() string {\n\tcfg := t.cfg.Load()\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (t *tcpOutput) start(ctx context.Context, idx int) {\n\tdefer t.wg.Done()\n\tworkerLogPrefix := fmt.Sprintf(\"worker-%d\", idx)\nSTART:\n\tif ctx.Err() != nil {\n\t\tt.logger.Printf(\"context error: %v\", ctx.Err())\n\t\treturn\n\t}\n\tcfg := t.cfg.Load()\n\tdc := t.dynCfg.Load()\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", cfg.Address)\n\tif err != nil {\n\t\tt.logger.Printf(\"%s failed to resolve address: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.RetryInterval)\n\t\tgoto START\n\t}\n\tconn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n\tif err != nil {\n\t\tt.logger.Printf(\"%s failed to dial TCP: %v\", workerLogPrefix, err)\n\t\ttime.Sleep(cfg.RetryInterval)\n\t\tgoto START\n\t}\n\tdefer conn.Close()\n\tif cfg.KeepAlive > 0 {\n\t\tconn.SetKeepAlive(true)\n\t\tconn.SetKeepAlivePeriod(cfg.KeepAlive)\n\t}\n\tbuffer := *t.buffer.Load()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase b := <-buffer:\n\t\t\tdelimiter := dc.delimiter\n\t\t\tif dc.limiter != nil {\n\t\t\t\t<-dc.limiter.C\n\t\t\t}\n\t\t\t// append delimiter\n\t\t\tb = append(b, delimiter...)\n\t\t\t_, err = conn.Write(b)\n\t\t\tif err != nil {\n\t\t\t\tt.logger.Printf(\"%s failed sending tcp bytes: %v\", workerLogPrefix, err)\n\t\t\t\tconn.Close()\n\t\t\t\ttime.Sleep(cfg.RetryInterval)\n\t\t\t\tgoto START\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc channelNeedsSwap(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.NumWorkers != nw.NumWorkers\n}\n"
  },
  {
    "path": "pkg/outputs/udp_output/udp_output.go",
    "content": "// © 2022 Nokia.\n//\n// This code is a Contribution to the gNMIc project (\"Work\") made under the Google Software Grant and Corporate Contributor License Agreement (\"CLA\") and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose.\n// This code is provided on an \"as is\" basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage udp_output\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"slices\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/proto\"\n\n\t\"github.com/openconfig/gnmic/pkg/api/utils\"\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/gtemplate\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\tgutils \"github.com/openconfig/gnmic/pkg/utils\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nconst (\n\tdefaultRetryTimer = 2 * time.Second\n\tloggingPrefix     = \"[udp_output:%s] \"\n)\n\nfunc init() {\n\toutputs.Register(\"udp\", func() outputs.Output {\n\t\treturn &udpSock{}\n\t})\n}\n\ntype udpSock struct {\n\toutputs.BaseOutput\n\n\tcfg    *atomic.Pointer[Config]\n\tdynCfg *atomic.Pointer[dynConfig]\n\tbuffer *atomic.Pointer[chan []byte]\n\n\trootCtx  context.Context\n\tcancelFn context.CancelFunc\n\twg       *sync.WaitGroup\n\tlogger   *log.Logger\n\n\tstore store.Store[any]\n}\n\ntype dynConfig struct {\n\ttargetTpl *template.Template\n\tevps      []formatters.EventProcessor\n\tmo        *formatters.MarshalOptions\n\tlimiter   *time.Ticker\n}\n\ntype Config struct {\n\tAddress            string        `mapstructure:\"address,omitempty\"` // ip:port\n\tRate               time.Duration `mapstructure:\"rate,omitempty\"`\n\tBufferSize         uint          `mapstructure:\"buffer-size,omitempty\"`\n\tFormat             string        `mapstructure:\"format,omitempty\"`\n\tAddTarget          string        `mapstructure:\"add-target,omitempty\"`\n\tTargetTemplate     string        `mapstructure:\"target-template,omitempty\"`\n\tOverrideTimestamps bool          `mapstructure:\"override-timestamps,omitempty\"`\n\tSplitEvents        bool          `mapstructure:\"split-events,omitempty\"`\n\tRetryInterval      time.Duration `mapstructure:\"retry-interval,omitempty\"`\n\tEnableMetrics      bool          `mapstructure:\"enable-metrics,omitempty\"`\n\tEventProcessors    []string      `mapstructure:\"event-processors,omitempty\"`\n}\n\nfunc (u *udpSock) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) {\n\ttcs, ps, acts, err := gutils.GetConfigMaps(u.store)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevps, err := formatters.MakeEventProcessors(\n\t\tlogger,\n\t\teventProcessors,\n\t\tps,\n\t\ttcs,\n\t\tacts,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn evps, nil\n}\n\nfunc (u *udpSock) init() {\n\tu.cfg = new(atomic.Pointer[Config])\n\tu.dynCfg = new(atomic.Pointer[dynConfig])\n\tu.buffer = new(atomic.Pointer[chan []byte])\n\tu.wg = new(sync.WaitGroup)\n\tu.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags)\n}\n\nfunc (u *udpSock) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error {\n\tu.init()\n\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\n\tu.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name))\n\n\toptions := &outputs.OutputOptions{}\n\tfor _, opt := range opts {\n\t\tif err := opt(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tu.store = options.Store\n\n\t// apply logger\n\tif options.Logger != nil && u.logger != nil {\n\t\tu.logger.SetOutput(options.Logger.Writer())\n\t\tu.logger.SetFlags(options.Logger.Flags())\n\t}\n\n\tdc := new(dynConfig)\n\t// initialize event processors\n\tdc.evps, err = u.buildEventProcessors(options.Logger, newCfg.EventProcessors)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, _, err = net.SplitHostPort(newCfg.Address)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"wrong address format: %v\", err)\n\t}\n\n\tif newCfg.Rate > 0 {\n\t\tdc.limiter = time.NewTicker(newCfg.Rate)\n\t}\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t}\n\n\tch := make(chan []byte, newCfg.BufferSize)\n\tu.buffer.Store(&ch)\n\tu.dynCfg.Store(dc)\n\tu.cfg.Store(newCfg)\n\n\tu.rootCtx = ctx\n\tu.rootCtx, u.cancelFn = context.WithCancel(u.rootCtx)\n\n\tu.wg.Add(1)\n\tgo u.start(u.rootCtx)\n\n\tu.logger.Printf(\"initialized UDP output: %s\", u.String())\n\treturn nil\n}\n\nfunc setDefaultsFor(cfg *Config) {\n\tif cfg.RetryInterval == 0 {\n\t\tcfg.RetryInterval = defaultRetryTimer\n\t}\n}\n\nfunc validate(cfg *Config) error {\n\tif cfg.Address == \"\" {\n\t\treturn errors.New(\"address is required\")\n\t}\n\t_, _, err := net.SplitHostPort(cfg.Address)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"wrong address format: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (u *udpSock) Validate(cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\treturn validate(newCfg)\n}\n\nfunc (u *udpSock) Update(_ context.Context, cfg map[string]any) error {\n\tnewCfg := new(Config)\n\terr := outputs.DecodeConfig(cfg, newCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsetDefaultsFor(newCfg)\n\tcurrCfg := u.cfg.Load()\n\n\tswapChannel := channelNeedsSwap(currCfg, newCfg)\n\trestartWorker := needsWorkerRestart(currCfg, newCfg)\n\trebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0\n\n\tdc := new(dynConfig)\n\tprevDC := u.dynCfg.Load()\n\n\tif rebuildProcessors {\n\t\tdc.evps, err = u.buildEventProcessors(u.logger, newCfg.EventProcessors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if prevDC != nil {\n\t\tdc.evps = prevDC.evps\n\t}\n\n\t// handle rate limiter changes\n\tif newCfg.Rate > 0 {\n\t\tif currCfg.Rate != newCfg.Rate {\n\t\t\t// rate changed, stop old limiter and create new one\n\t\t\tif prevDC != nil && prevDC.limiter != nil {\n\t\t\t\tprevDC.limiter.Stop()\n\t\t\t}\n\t\t\tdc.limiter = time.NewTicker(newCfg.Rate)\n\t\t} else {\n\t\t\t// rate unchanged, copy old limiter\n\t\t\tif prevDC != nil {\n\t\t\t\tdc.limiter = prevDC.limiter\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// no rate limiting, stop old limiter if any\n\t\tif prevDC != nil && prevDC.limiter != nil {\n\t\t\tprevDC.limiter.Stop()\n\t\t}\n\t\tdc.limiter = nil\n\t}\n\n\tdc.mo = &formatters.MarshalOptions{\n\t\tFormat:     newCfg.Format,\n\t\tOverrideTS: newCfg.OverrideTimestamps,\n\t}\n\n\tif newCfg.TargetTemplate == \"\" {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t} else if newCfg.AddTarget != \"\" {\n\t\tdc.targetTpl, err = gtemplate.CreateTemplate(\"target-template\", newCfg.TargetTemplate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs)\n\t} else {\n\t\tdc.targetTpl = outputs.DefaultTargetTemplate\n\t}\n\n\t// store new configs\n\tu.dynCfg.Store(dc)\n\tu.cfg.Store(newCfg)\n\n\tif swapChannel || restartWorker {\n\t\tvar newChan chan []byte\n\t\toldChan := *u.buffer.Load()\n\n\t\tif swapChannel {\n\t\t\tnewChan = make(chan []byte, newCfg.BufferSize)\n\t\t} else {\n\t\t\tnewChan = oldChan\n\t\t}\n\n\t\t// create new context and WaitGroup\n\t\trunCtx, cancel := context.WithCancel(u.rootCtx)\n\t\tnewWG := new(sync.WaitGroup)\n\n\t\t// Save old pointers\n\t\toldCancel := u.cancelFn\n\t\toldWG := u.wg\n\n\t\t// swap\n\t\tu.cancelFn = cancel\n\t\tu.wg = newWG\n\t\tu.buffer.Store(&newChan)\n\n\t\t// start new worker\n\t\tu.wg.Add(1)\n\t\tgo u.start(runCtx)\n\n\t\t// cancel old worker and wait\n\t\tif oldCancel != nil {\n\t\t\toldCancel()\n\t\t}\n\t\tif oldWG != nil {\n\t\t\toldWG.Wait()\n\t\t}\n\n\t\t// drain old channel if we swapped\n\t\tif swapChannel {\n\t\tDRAIN_LOOP:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase b, ok := <-oldChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase newChan <- b:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\t// new channel is full, drop message\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tbreak DRAIN_LOOP\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tu.logger.Printf(\"restarted UDP output worker\")\n\t}\n\n\tu.logger.Printf(\"updated UDP output: %s\", u.String())\n\treturn nil\n}\n\nfunc (u *udpSock) UpdateProcessor(name string, pcfg map[string]any) error {\n\tcfg := u.cfg.Load()\n\tdc := u.dynCfg.Load()\n\n\tnewEvps, changed, err := outputs.UpdateProcessorInSlice(\n\t\tu.logger,\n\t\tu.store,\n\t\tcfg.EventProcessors,\n\t\tdc.evps,\n\t\tname,\n\t\tpcfg,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif changed {\n\t\tnewDC := *dc\n\t\tnewDC.evps = newEvps\n\t\tu.dynCfg.Store(&newDC)\n\t\tu.logger.Printf(\"updated event processor %s\", name)\n\t}\n\treturn nil\n}\n\nfunc (u *udpSock) Write(ctx context.Context, m proto.Message, meta outputs.Meta) {\n\tif m == nil {\n\t\treturn\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn\n\tdefault:\n\t\tcfg := u.cfg.Load()\n\t\tdc := u.dynCfg.Load()\n\t\tif cfg == nil || dc == nil {\n\t\t\treturn\n\t\t}\n\n\t\trsp, err := outputs.AddSubscriptionTarget(m, meta, cfg.AddTarget, dc.targetTpl)\n\t\tif err != nil {\n\t\t\tu.logger.Printf(\"failed to add target to the response: %v\", err)\n\t\t}\n\t\tbb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...)\n\t\tif err != nil {\n\t\t\tu.logger.Printf(\"failed marshaling proto msg: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tbuffer := u.buffer.Load()\n\t\tif buffer == nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, b := range bb {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase (*buffer) <- b:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (u *udpSock) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {}\n\nfunc (u *udpSock) Close() error {\n\tif u.cancelFn != nil {\n\t\tu.cancelFn()\n\t}\n\tu.wg.Wait()\n\n\t// Stop limiter if exists\n\tdc := u.dynCfg.Load()\n\tif dc != nil && dc.limiter != nil {\n\t\tdc.limiter.Stop()\n\t}\n\treturn nil\n}\n\nfunc (u *udpSock) String() string {\n\tcfg := u.cfg.Load()\n\tif cfg == nil {\n\t\treturn \"\"\n\t}\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(b)\n}\n\nfunc (u *udpSock) start(ctx context.Context) {\n\tdefer u.wg.Done()\n\nDIAL:\n\tif ctx.Err() != nil {\n\t\tu.logger.Printf(\"context error: %v\", ctx.Err())\n\t\treturn\n\t}\n\n\tcfg := u.cfg.Load()\n\tdc := u.dynCfg.Load()\n\tif cfg == nil || dc == nil {\n\t\tu.logger.Printf(\"config not loaded\")\n\t\treturn\n\t}\n\n\tudpAddr, err := net.ResolveUDPAddr(\"udp\", cfg.Address)\n\tif err != nil {\n\t\tu.logger.Printf(\"failed to resolve UDP address: %v\", err)\n\t\ttime.Sleep(cfg.RetryInterval)\n\t\tgoto DIAL\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", nil, udpAddr)\n\tif err != nil {\n\t\tu.logger.Printf(\"failed to dial UDP: %v\", err)\n\t\ttime.Sleep(cfg.RetryInterval)\n\t\tgoto DIAL\n\t}\n\tdefer conn.Close()\n\n\tu.logger.Printf(\"connected to %s\", cfg.Address)\n\n\t// Snapshot buffer and limiter at connection time\n\tbuffer := *u.buffer.Load()\n\tlimiter := dc.limiter\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tu.logger.Printf(\"UDP worker shutting down\")\n\t\t\treturn\n\t\tcase b, ok := <-buffer:\n\t\t\tif !ok {\n\t\t\t\tu.logger.Printf(\"buffer channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif limiter != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-limiter.C:\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, err = conn.Write(b)\n\t\t\tif err != nil {\n\t\t\t\tu.logger.Printf(\"failed sending UDP bytes: %v\", err)\n\t\t\t\tconn.Close()\n\t\t\t\ttime.Sleep(cfg.RetryInterval)\n\t\t\t\tgoto DIAL\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc channelNeedsSwap(old, nw *Config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.BufferSize != nw.BufferSize\n}\n\nfunc needsWorkerRestart(old, nw *Config) bool {\n\tif old == nil || nw == nil {\n\t\treturn true\n\t}\n\treturn old.Address != nw.Address\n}\n"
  },
  {
    "path": "pkg/pipeline/pipeline.go",
    "content": "package pipeline\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/formatters\"\n\t\"github.com/openconfig/gnmic/pkg/outputs\"\n\t\"google.golang.org/protobuf/proto\"\n)\n\n// Msg contains the data to be passed from targets or inputs to outputs.\ntype Msg struct {\n\tMsg     proto.Message\n\tMeta    outputs.Meta\n\tEvents  []*formatters.EventMsg\n\tOutputs map[string]struct{}\n}\n\nfunc NewMsg(msg proto.Message, meta outputs.Meta,\n\tevents []*formatters.EventMsg,\n\toutputs map[string]struct{}) *Msg {\n\treturn &Msg{\n\t\tMsg:     msg,\n\t\tMeta:    meta,\n\t\tEvents:  events,\n\t\tOutputs: outputs,\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/authbrearer.go",
    "content": "// originally from: https://github.com/damiannolan/sasl\n\n// © 2024 Nokia.\n//\n// This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0.\n// No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose.\n// This code is provided on an “as is” basis without any warranties of any kind.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage utils\n\nimport (\n\t\"context\"\n\n\t\"github.com/IBM/sarama\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/clientcredentials\"\n)\n\n// TokenProvider is a simple struct that implements sarama.AccessTokenProvider.\ntype TokenProvider struct {\n\ttokenSource oauth2.TokenSource\n}\n\nfunc NewTokenProvider(clientID, clientSecret, tokenURL string) sarama.AccessTokenProvider {\n\tcfg := clientcredentials.Config{\n\t\tClientID:     clientID,\n\t\tClientSecret: clientSecret,\n\t\tTokenURL:     tokenURL,\n\t}\n\n\treturn &TokenProvider{\n\t\ttokenSource: cfg.TokenSource(context.Background()),\n\t}\n}\n\nfunc (t *TokenProvider) Token() (*sarama.AccessToken, error) {\n\ttoken, err := t.tokenSource.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sarama.AccessToken{Token: token.AccessToken}, nil\n}\n"
  },
  {
    "path": "pkg/utils/gnmi.go",
    "content": "package utils\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/AlekSi/pointer\"\n\t\"github.com/openconfig/gnmi/proto/gnmi\"\n\t\"github.com/openconfig/gnmic/pkg/api\"\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n)\n\nconst (\n\tSubscriptionMode_STREAM               = \"STREAM\"\n\tSubscriptionMode_ONCE                 = \"ONCE\"\n\tSubscriptionMode_POLL                 = \"POLL\"\n\tSubscriptionStreamMode_TARGET_DEFINED = \"TARGET_DEFINED\"\n\tSubscriptionStreamMode_ON_CHANGE      = \"ON_CHANGE\"\n\tSubscriptionStreamMode_SAMPLE         = \"SAMPLE\"\n)\n\nconst (\n\tsubscriptionDefaultMode       = SubscriptionMode_STREAM\n\tsubscriptionDefaultStreamMode = SubscriptionStreamMode_TARGET_DEFINED\n\tsubscriptionDefaultEncoding   = \"JSON\"\n)\n\nvar ErrConfig = errors.New(\"config error\")\n\nfunc CreateSubscribeRequest(cfg *types.SubscriptionConfig, tc *types.TargetConfig, defaultEncoding string) (*gnmi.SubscribeRequest, error) {\n\tif err := validateAndSetDefaults(cfg); err != nil {\n\t\treturn nil, err\n\t}\n\tgnmiOpts, err := SubscriptionOpts(cfg, tc, defaultEncoding)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn api.NewSubscribeRequest(gnmiOpts...)\n}\n\nfunc validateAndSetDefaults(sc *types.SubscriptionConfig) error {\n\tnumPaths := len(sc.Paths)\n\tnumStreamSubs := len(sc.StreamSubscriptions)\n\tif sc.Prefix == \"\" && numPaths == 0 && numStreamSubs == 0 {\n\t\treturn fmt.Errorf(\"%w: missing path(s) in subscription %q\", ErrConfig, sc.Name)\n\t}\n\n\tif numPaths > 0 && numStreamSubs > 0 {\n\t\treturn fmt.Errorf(\"%w: subscription %q: cannot set 'paths' and 'stream-subscriptions' at the same time\", ErrConfig, sc.Name)\n\t}\n\n\t// validate subscription Mode\n\tswitch strings.ToUpper(sc.Mode) {\n\tcase \"\":\n\t\tsc.Mode = subscriptionDefaultMode\n\tcase \"ONCE\", \"POLL\":\n\t\tif numStreamSubs > 0 {\n\t\t\treturn fmt.Errorf(\"%w: subscription %q: cannot set 'stream-subscriptions' and 'mode'\", ErrConfig, sc.Name)\n\t\t}\n\tcase \"STREAM\":\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: subscription %s: unknown subscription mode %q\", ErrConfig, sc.Name, sc.Mode)\n\t}\n\t// validate encoding\n\tif sc.Encoding != nil {\n\t\tswitch strings.ToUpper(strings.ReplaceAll(*sc.Encoding, \"-\", \"_\")) {\n\t\tcase \"\":\n\t\t\tsc.Encoding = pointer.ToString(subscriptionDefaultEncoding)\n\t\tcase \"JSON\":\n\t\tcase \"BYTES\":\n\t\tcase \"PROTO\":\n\t\tcase \"ASCII\":\n\t\tcase \"JSON_IETF\":\n\t\tdefault:\n\t\t\t// allow integer encoding values\n\t\t\t_, err := strconv.Atoi(*sc.Encoding)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s: unknown encoding type %q\", ErrConfig, sc.Name, *sc.Encoding)\n\t\t\t}\n\t\t}\n\t}\n\n\t// validate subscription stream mode\n\tif strings.ToUpper(sc.Mode) == \"STREAM\" {\n\t\tif len(sc.StreamSubscriptions) == 0 {\n\t\t\tswitch strings.ToUpper(strings.ReplaceAll(sc.StreamMode, \"-\", \"_\")) {\n\t\t\tcase \"\":\n\t\t\t\tsc.StreamMode = subscriptionDefaultStreamMode\n\t\t\tcase \"TARGET_DEFINED\":\n\t\t\tcase \"SAMPLE\":\n\t\t\tcase \"ON_CHANGE\":\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s: unknown stream-mode type %q\", ErrConfig, sc.Name, sc.StreamMode)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t// stream subscriptions\n\t\tfor i, scs := range sc.StreamSubscriptions {\n\t\t\tif scs.Mode != \"\" {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'mode' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.Prefix != \"\" {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'prefix' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.Target != \"\" {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'target' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.SetTarget {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'set-target' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.Encoding != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'encoding' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.History != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'history' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.Models != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'models' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.UpdatesOnly {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'updates-only' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.StreamSubscriptions != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'subscriptions' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\t\t\tif scs.Qos != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: 'qos' attribute cannot be set\", ErrConfig, sc.Name, i)\n\t\t\t}\n\n\t\t\tswitch strings.ReplaceAll(strings.ToUpper(scs.StreamMode), \"-\", \"_\") {\n\t\t\tcase \"\":\n\t\t\t\tscs.StreamMode = subscriptionDefaultStreamMode\n\t\t\tcase \"TARGET_DEFINED\":\n\t\t\tcase \"SAMPLE\":\n\t\t\tcase \"ON_CHANGE\":\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"%w: subscription %s/%d: unknown subscription stream mode %q\", ErrConfig, sc.Name, i, scs.StreamMode)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc SubscriptionOpts(sc *types.SubscriptionConfig, tc *types.TargetConfig, defaultEncoding string) ([]api.GNMIOption, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0, 4)\n\n\tgnmiOpts = append(gnmiOpts,\n\t\tapi.Prefix(sc.Prefix),\n\t\tapi.SubscriptionListMode(sc.Mode),\n\t\tapi.UpdatesOnly(sc.UpdatesOnly),\n\t)\n\t// encoding\n\tswitch {\n\tcase sc.Encoding != nil:\n\t\tgnmiOpts = append(gnmiOpts, api.Encoding(*sc.Encoding))\n\tcase tc != nil && tc.Encoding != nil:\n\t\tgnmiOpts = append(gnmiOpts, api.Encoding(*tc.Encoding))\n\tdefault:\n\t\tgnmiOpts = append(gnmiOpts, api.Encoding(defaultEncoding))\n\t}\n\n\t// history extension\n\tif sc.History != nil {\n\t\tif !sc.History.Snapshot.IsZero() {\n\t\t\tgnmiOpts = append(gnmiOpts, api.Extension_HistorySnapshotTime(sc.History.Snapshot))\n\t\t}\n\t\tif !sc.History.Start.IsZero() && !sc.History.End.IsZero() {\n\t\t\tgnmiOpts = append(gnmiOpts, api.Extension_HistoryRange(sc.History.Start, sc.History.End))\n\t\t}\n\t}\n\t// QoS\n\tif sc.Qos != nil {\n\t\tgnmiOpts = append(gnmiOpts, api.Qos(*sc.Qos))\n\t}\n\n\t// add models\n\tfor _, m := range sc.Models {\n\t\tgnmiOpts = append(gnmiOpts, api.UseModel(m, \"\", \"\"))\n\t}\n\n\t// add target opt\n\tif sc.Target != \"\" {\n\t\tgnmiOpts = append(gnmiOpts, api.Target(sc.Target))\n\t} else if sc.SetTarget {\n\t\tgnmiOpts = append(gnmiOpts, api.Target(tc.Name))\n\t}\n\t// add gNMI subscriptions\n\t// multiple stream subscriptions\n\tif len(sc.StreamSubscriptions) > 0 {\n\t\tfor _, ssc := range sc.StreamSubscriptions {\n\t\t\tstreamGNMIOpts, err := streamSubscriptionOpts(ssc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tgnmiOpts = append(gnmiOpts, streamGNMIOpts...)\n\t\t}\n\t}\n\n\tfor _, p := range sc.Paths {\n\t\tsubGnmiOpts := make([]api.GNMIOption, 0, 2)\n\t\tswitch gnmi.SubscriptionList_Mode(gnmi.SubscriptionList_Mode_value[strings.ToUpper(sc.Mode)]) {\n\t\tcase gnmi.SubscriptionList_STREAM:\n\t\t\tswitch gnmi.SubscriptionMode(gnmi.SubscriptionMode_value[strings.Replace(strings.ToUpper(sc.StreamMode), \"-\", \"_\", -1)]) {\n\t\t\tcase gnmi.SubscriptionMode_ON_CHANGE:\n\t\t\t\tif sc.HeartbeatInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t\t}\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode))\n\t\t\tcase gnmi.SubscriptionMode_TARGET_DEFINED:\n\t\t\t\tif sc.HeartbeatInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t\t}\n\t\t\t\tif sc.SampleInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval))\n\t\t\t\t}\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant))\n\t\t\t\tif sc.SuppressRedundant && sc.HeartbeatInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t\t}\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode))\n\t\t\tcase gnmi.SubscriptionMode_SAMPLE:\n\t\t\t\tif sc.SampleInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval))\n\t\t\t\t}\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant))\n\t\t\t\tif sc.SuppressRedundant && sc.HeartbeatInterval != nil {\n\t\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t\t}\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode))\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"%w: subscription %s unknown stream subscription mode %s\", ErrConfig, sc.Name, sc.StreamMode)\n\t\t\t}\n\t\tdefault:\n\t\t\t// poll and once subscription modes\n\t\t}\n\t\t//\n\t\tsubGnmiOpts = append(subGnmiOpts, api.Path(p))\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Subscription(subGnmiOpts...),\n\t\t)\n\t}\n\n\t// Depth extension\n\tif sc.Depth > 0 {\n\t\tgnmiOpts = append(gnmiOpts, api.Extension_Depth(sc.Depth))\n\t}\n\treturn gnmiOpts, nil\n}\n\nfunc streamSubscriptionOpts(sc *types.SubscriptionConfig) ([]api.GNMIOption, error) {\n\tgnmiOpts := make([]api.GNMIOption, 0)\n\tfor _, p := range sc.Paths {\n\t\tsubGnmiOpts := make([]api.GNMIOption, 0, 2)\n\t\tswitch gnmi.SubscriptionMode(gnmi.SubscriptionMode_value[strings.Replace(strings.ToUpper(sc.StreamMode), \"-\", \"_\", -1)]) {\n\t\tcase gnmi.SubscriptionMode_ON_CHANGE:\n\t\t\tif sc.HeartbeatInterval != nil {\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t}\n\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode))\n\t\tcase gnmi.SubscriptionMode_SAMPLE, gnmi.SubscriptionMode_TARGET_DEFINED:\n\t\t\tif sc.SampleInterval != nil {\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval))\n\t\t\t}\n\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant))\n\t\t\tif sc.SuppressRedundant && sc.HeartbeatInterval != nil {\n\t\t\t\tsubGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval))\n\t\t\t}\n\t\t\tsubGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode))\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%w: subscription %s unknown stream subscription mode %s\", ErrConfig, sc.Name, sc.StreamMode)\n\t\t}\n\n\t\tsubGnmiOpts = append(subGnmiOpts, api.Path(p))\n\t\tgnmiOpts = append(gnmiOpts,\n\t\t\tapi.Subscription(subGnmiOpts...),\n\t\t)\n\t}\n\treturn gnmiOpts, nil\n}\n"
  },
  {
    "path": "pkg/utils/gnmiext.go",
    "content": "package utils\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype RegisteredExtensions map[int32]string\n\nfunc ParseRegisteredExtensions(pairs []string) (RegisteredExtensions, error) {\n\tres := RegisteredExtensions{}\n\n\tfor _, p := range pairs {\n\t\tidMsg := strings.Split(p, \":\")\n\n\t\tif len(idMsg) < 2 {\n\t\t\treturn nil, fmt.Errorf(\"'%s' registered extension has invalid format, 123:package.Message format is expected\", p)\n\t\t}\n\n\t\tid, err := strconv.ParseInt(idMsg[0], 10, 32)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres[int32(id)] = idMsg[1]\n\t}\n\n\treturn res, nil\n}\n"
  },
  {
    "path": "pkg/utils/store.go",
    "content": "package utils\n\nimport (\n\t\"github.com/openconfig/gnmic/pkg/api/types\"\n\t\"github.com/zestor-dev/zestor/store\"\n)\n\nfunc GetConfigMaps(s store.Store[any]) (map[string]*types.TargetConfig, map[string]map[string]any, map[string]map[string]any, error) {\n\ttgm, err := s.List(\"targets\")\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\ttcs := make(map[string]*types.TargetConfig)\n\tfor n, t := range tgm {\n\t\tif tc, ok := t.(*types.TargetConfig); ok {\n\t\t\ttcs[n] = tc\n\t\t}\n\t}\n\tegm, err := s.List(\"processors\")\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\teps := make(map[string]map[string]any)\n\tfor n, e := range egm {\n\t\tif ep, ok := e.(map[string]any); ok {\n\t\t\teps[n] = ep\n\t\t}\n\t}\n\tagm, err := s.List(\"actions\")\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tacts := make(map[string]map[string]any)\n\tfor n, a := range agm {\n\t\tif act, ok := a.(map[string]any); ok {\n\t\t\tacts[n] = act\n\t\t}\n\t}\n\treturn tcs, eps, acts, nil\n}\n"
  },
  {
    "path": "pkg/version/version.go",
    "content": "package version\n\nvar (\n\tVersion = \"dev\"\n\tCommit  = \"none\"\n\tDate    = \"unknown\"\n\tGitURL  = \"\"\n)\n"
  },
  {
    "path": "tests/api.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\ncurl -sS http://$1/api/v1/config | yq eval -P\ncurl -sS http://$1/api/v1/config/targets | yq eval -P\nfor i in $(curl -sS http://$1/api/v1/config/targets | jq -r 'keys[]');\n    do\n        curl -sS http://$1/api/v1/config/targets/$i |yq eval -P\n    done\n\ncurl -sS http://$1/api/v1/config/subscriptions | yq eval -P\ncurl -sS http://$1/api/v1/config/outputs | yq eval -P\ncurl -sS http://$1/api/v1/config/inputs | yq eval -P\ncurl -sS http://$1/api/v1/config/processors | yq eval -P\ncurl -sS http://$1/api/v1/config/clustering | yq eval -P\ncurl -sS http://$1/api/v1/config/api-server | yq eval -P\ncurl -sS http://$1/api/v1/config/gnmi-server | yq eval -P\ncurl -sS http://$1/api/v1/targets | yq eval -P\n\nfor i in $(curl -sS http://$1/api/v1/targets | jq -r 'keys[]');\n    do\n        curl -sS http://$1/api/v1/targets/$i | yq eval -P\n    done\n\ncurl -sS http://$1/api/v1/cluster | yq eval -P\ncurl -sS http://$1/api/v1/cluster/members | yq eval -P\ncurl -sS http://$1/api/v1/cluster/leader | yq eval -P\n"
  },
  {
    "path": "tests/capabilities_cmd.sh",
    "content": "#!/bin/bash\n\ngnmic_base_cmd=\"./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify -d\"\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n# capabilities\n$gnmic_base_cmd -a clab-test1-srl1 capabilities\n$gnmic_base_cmd -a clab-test1-srl2 capabilities\n$gnmic_base_cmd -a clab-test1-srl3 capabilities\n# capabilities multi host\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 capabilities\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 capabilities --format json\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 capabilities\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 capabilities --format json\n\nprintf \"capabilities with config file\\n\"\n# capabilities using config file\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --no-prefix\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --no-prefix\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --no-prefix\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --format json\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --format json\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --format json\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --format json --no-prefix\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --format json --no-prefix\n./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --format json --no-prefix\n# multi host\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --no-prefix\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json --no-prefix\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --no-prefix\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json --no-prefix\n\n## hosts from file\n### target nodes in address field\n./gnmic-rc1 --config configs/gnmic2.yaml capabilities\n./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json\n./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json --no-prefix\n### target nodes in targets field\n./gnmic-rc1 --config configs/gnmic3.yaml capabilities\n./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json\n./gnmic-rc1 --config configs/gnmic3.yaml capabilities --format json --no-prefix\n\n\n# set skip-verify value to false in the config file\nsed -i 's/^skip-verify: true/skip-verify: false/g' configs/gnmic1.yaml\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json\n\n# comment out skip-verify value in the config file and change it to true\nsed -i 's/^skip-verify: false/#skip-verify: true/g' configs/gnmic1.yaml\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json\n\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json\n\n# use --tls-ca\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n# use --tls-server-name s\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --tls-server-name srl1 \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --tls-server-name srl2 \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --tls-server-name srl3 \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml \\\n                                                               --tls-ca clab/clab-test1/.tls/ca/ca.pem \\\n                                                               capabilities\n\n# revert back skip-verify value to true\nsed -i 's/^#skip-verify: true/skip-verify: true/g' configs/gnmic1.yaml\n"
  },
  {
    "path": "tests/clab/labN.clab.yaml",
    "content": "name: lab{{ (ds \"data\").ID }}\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux:23.10.3\n      labels:\n        test: telemetry\n\n  nodes:\n    super-spine1:\n    super-spine2:\n    \n    spine1:\n    spine2:\n    spine3:\n    spine4:\n\n    leaf1:\n    leaf2:\n    leaf3:\n    leaf4:\n    leaf5:\n    leaf6:\n    leaf7:\n    leaf8:\n    \n  links:\n    # super-spine1 links\n    - endpoints: [\"super-spine1:e1-1\", \"spine1:e1-1\"]\n    - endpoints: [\"super-spine1:e1-2\", \"spine2:e1-1\"]\n    - endpoints: [\"super-spine1:e1-3\", \"spine3:e1-1\"]\n    - endpoints: [\"super-spine1:e1-4\", \"spine4:e1-1\"]\n    # super-spine2 links\n    - endpoints: [\"super-spine2:e1-1\", \"spine1:e1-2\"]\n    - endpoints: [\"super-spine2:e1-2\", \"spine2:e1-2\"]\n    - endpoints: [\"super-spine2:e1-3\", \"spine3:e1-2\"]\n    - endpoints: [\"super-spine2:e1-4\", \"spine4:e1-2\"]\n\n    # spine1 links\n    - endpoints: [\"spine1:e1-3\", \"leaf1:e1-1\"]\n    - endpoints: [\"spine1:e1-4\", \"leaf2:e1-1\"]\n    - endpoints: [\"spine1:e1-5\", \"leaf3:e1-1\"]\n    - endpoints: [\"spine1:e1-6\", \"leaf4:e1-1\"]\n    # spine2 links\n    - endpoints: [\"spine2:e1-3\", \"leaf1:e1-2\"]\n    - endpoints: [\"spine2:e1-4\", \"leaf2:e1-2\"]\n    - endpoints: [\"spine2:e1-5\", \"leaf3:e1-2\"]\n    - endpoints: [\"spine2:e1-6\", \"leaf4:e1-2\"]\n    # spine3 links\n    - endpoints: [\"spine3:e1-3\", \"leaf5:e1-1\"]\n    - endpoints: [\"spine3:e1-4\", \"leaf6:e1-1\"]\n    - endpoints: [\"spine3:e1-5\", \"leaf7:e1-1\"]\n    - endpoints: [\"spine3:e1-6\", \"leaf8:e1-1\"]\n    # spine4 links\n    - endpoints: [\"spine4:e1-3\", \"leaf5:e1-2\"]\n    - endpoints: [\"spine4:e1-4\", \"leaf6:e1-2\"]\n    - endpoints: [\"spine4:e1-5\", \"leaf7:e1-2\"]\n    - endpoints: [\"spine4:e1-6\", \"leaf8:e1-2\"]\n"
  },
  {
    "path": "tests/clab/loaders/gnmic-agg.yaml",
    "content": "log: true\ninsecure: true\n\nloader:\n  type: consul\n  address: clab-loaders-consul-agent:8500\n  debug: true\n  on-add:\n    - query\n  services:\n    - name: collectors-gnmi-server\n\nsubscriptions:\n  collectors:\n    paths:\n      - /\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n\n# clustering config\nclustering:\n  cluster-name: aggregators\n  targets-watch-timer: 10s\n  locker:\n    type: consul\n    address: clab-loaders-consul-agent:8500\n\noutputs:\n  aggregator:\n    type: prometheus\n    service-registration:\n      address: clab-loaders-consul-agent:8500\n\nactions:\n  query:\n    name: query\n    type: http\n    url: http://cht.sh\n    debug: true\n    "
  },
  {
    "path": "tests/clab/loaders/gnmic-docker-loader.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  debug: true\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n  on-add:\n    - interfaces\n    - enable_interfaces\n    \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: collectors\n  targets-watch-timer: 10s\n  locker:\n    type: consul\n    address: clab-loaders-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-loaders-consul-agent:8500\n\noutputs:\n  collector:\n    type: prometheus\n    service-registration:\n      address: clab-loaders-consul-agent:8500\n    event-processors:\n      - trim-prefixes\n\nprocessors:\n  trim-prefixes:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - path-base:\n            apply-on: \"name\"\n\nactions:\n  interfaces:\n    name: interfaces\n    type: template\n    debug: true\n    template: |\n      {{- if .Input | strings.Contains \"srl1\"}}ethernet-1/1,ethernet-1/2\n      {{- else -}}ethernet-1/1\n      {{- end -}}\n\n  enable_interfaces:\n    name: enable_interfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths: \n      - /\n    values:\n      -  | \n        {{- $ifaces := coll.Slice -}}\n        {{- range $iface := .Env.interfaces | strings.Split \",\" -}}\n        {{- $ifaces =  $ifaces | coll.Append (coll.Dict \"name\" $iface \"admin-state\" \"enable\") -}}\n        {{- end -}}\n        {{- ( coll.Dict \"interface\" $ifaces ) | data.ToJSON -}}\n"
  },
  {
    "path": "tests/clab/loaders/gnmic-file-loader.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: file\n  debug: true\n  path: ./targets/targets.yaml\n  on-add:\n    - interfaces\n    - enable_interfaces\n    \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: collectors\n  targets-watch-timer: 10s\n  locker:\n    type: consul\n    address: clab-loaders-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-loaders-consul-agent:8500\n\noutputs:\n  collector:\n    type: prometheus\n    service-registration:\n      address: clab-loaders-consul-agent:8500\n    event-processors:\n      - trim-prefixes\n\nprocessors:\n  trim-prefixes:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - path-base:\n            apply-on: \"name\"\n\nactions:\n  interfaces:\n    name: interfaces\n    type: template\n    debug: true\n    template: |\n      {{- if .Input | strings.Contains \"srl1\"}}ethernet-1/1,ethernet-1/2\n      {{- else -}}ethernet-1/1\n      {{- end -}}\n\n  enable_interfaces:\n    name: enable_interfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths: \n      - /\n    values:\n      -  | \n        {{- $ifaces := coll.Slice -}}\n        {{- range $iface := .Env.interfaces | strings.Split \",\" -}}\n        {{- $ifaces =  $ifaces | coll.Append (coll.Dict \"name\" $iface \"admin-state\" \"enable\") -}}\n        {{- end -}}\n        {{- ( coll.Dict \"interface\" $ifaces ) | data.ToJSON -}}\n"
  },
  {
    "path": "tests/clab/loaders/loaders.clab.yaml",
    "content": "name: loaders\n\ntopology:\n  defaults:\n    kind: linux\n    image: gnmic:0.0.0-rc1\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux:23.10.3\n\n  nodes:\n    srl1:\n      kind: nokia_srlinux\n    srl2:\n      kind: nokia_srlinux\n    srl3:\n      kind: nokia_srlinux\n    \n    consul-agent:\n      image: consul:1.15.4\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    gnmic1:\n      binds:\n        - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n        - targets/targets.yaml:/app/targets/targets.yaml\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic1\n        GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic1:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic1\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic1:9804\n    \n    gnmic2:\n      binds:\n        - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n        - targets/targets.yaml:/app/targets/targets.yaml\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic2\n        GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic2:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic2\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic2:9805\n\n    gnmic3:\n      binds:\n        - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n        - targets/targets.yaml:/app/targets/targets.yaml\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic3\n        GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic3:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic3\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic3:9806\n\n    agg-gnmic1:\n      binds:\n        - gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7893:7893\n        - 9807:9807\n      env:\n        GNMIC_API: :7893\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic1\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic1:9807\n\n    agg-gnmic2:\n      binds:\n        - gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7894:7894\n        - 9808:9808\n      env:\n        GNMIC_API: :7894\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic2\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic2:9808\n\n    agg-gnmic3:\n      binds:\n        - gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7895:7895\n        - 9809:9809\n      env:\n        GNMIC_API: :7895\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic3\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic3:9809\n\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n    - endpoints: [\"srl1:e1-2\", \"srl3:e1-1\"]"
  },
  {
    "path": "tests/clab/loaders/loaders.clab_vars.yaml",
    "content": "gnmic_config_file: gnmic-docker-loader.yaml\n"
  },
  {
    "path": "tests/clab/loaders/targets/targets.yaml",
    "content": "clab-loaders-srl1:\nclab-loaders-srl3:\n"
  },
  {
    "path": "tests/clab/telemetry/gnmic-agg.yaml",
    "content": "log: true\ninsecure: true\n\nloader:\n  type: consul\n  address: clab-telemetry-consul-agent:8500\n  debug: true\n  services:\n    - name: collectors-gnmi-server\n\nsubscriptions:\n  collectors:\n    paths:\n      - /\n    stream-mode: on-change\n\napi-server:\n  enable-metrics: true\n\n# clustering config\nclustering:\n  cluster-name: aggregators\n  targets-watch-timer: 60s\n  locker:\n    type: consul\n    address: clab-telemetry-consul-agent:8500\n\noutputs:\n  aggregator:\n    type: prometheus\n    service-registration:\n      address: clab-telemetry-consul-agent:8500\n"
  },
  {
    "path": "tests/clab/telemetry/gnmic.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  debug: true\n  enable-metrics: true\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab1\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab2\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab3\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab4\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: lab5\n  on-add:\n    - interfaces\n    - enable_interfaces\n    \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: collectors\n  targets-watch-timer: 60s\n  locker:\n    type: consul\n    address: clab-telemetry-consul-agent:8500\n\ngnmi-server:\n  enable-metrics: true\n  service-registration:\n    address: clab-telemetry-consul-agent:8500\n\noutputs:\n  collector:\n    type: prometheus\n    service-registration:\n      address: clab-telemetry-consul-agent:8500\n    event-processors:\n      - trim-prefixes\n\n  influxdb-output:\n    type: influxdb\n    url: http://clab-telemetry-influxdb:8086\n    bucket: telemetry # db name\n    token: gnmic:gnmic # username:password\n    batch-size: 1000\n    flush-timer: 10s\n    event-processors:\n      - trim-prefixes\n  \n  kafka-output:\n    type: kafka\n    address: clab-telemetry-kafka-server:9092\n    topic: telemetry\n    event-processors:\n      - trim-prefixes\n\n  nats-output:\n    type: nats\n    address: clab-telemetry-nats:4222\n    subject: telemetry\n    event-processors:\n      - trim-prefixes\n      \nprocessors:\n  trim-prefixes:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - path-base:\n            apply-on: \"name\"\n\nactions:\n  interfaces:\n    name: interfaces\n    type: template\n    debug: true\n    template: |\n      {{- if .Input | strings.Contains \"super-spine\"}}ethernet-1/1,ethernet-1/2,ethernet-1/3,ethernet-1/4\n      {{- else  if .Input | strings.Contains \"spine\"}}ethernet-1/1,ethernet-1/2,ethernet-1/3,ethernet-1/4,ethernet-1/5,ethernet-1/6\n      {{- else if .Input | strings.Contains \"leaf\"}}ethernet-1/1,ethernet-1/2{{- end -}}\n\n  enable_interfaces:\n    name: enable_interfaces\n    type: gnmi\n    target: '{{ .Input }}'\n    rpc: set\n    encoding: json_ietf\n    debug: true\n    paths: \n      - /\n    values:\n      -  | \n        {{- $ifaces := coll.Slice -}}\n        {{- range $iface := .Env.interfaces | strings.Split \",\" -}}\n        {{- $ifaces =  $ifaces | coll.Append (coll.Dict \"name\" $iface \"admin-state\" \"enable\") -}}\n        {{- end -}}\n        {{- ( coll.Dict \"interface\" $ifaces ) | data.ToJSON -}}\n"
  },
  {
    "path": "tests/clab/telemetry/grafana/dashboards.yaml",
    "content": "apiVersion: 1\n\nproviders:\n- name: 'gNMIc Internal Metrics'\n  orgId: 1\n  folder: ''\n  type: file\n  disableDeletion: false\n  editable: true\n  options:\n    path: /var/lib/grafana/dashboards\n    foldersFromFilesStructure: true\n"
  },
  {
    "path": "tests/clab/telemetry/grafana/datasources/datasource.yaml",
    "content": "apiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-telemetry-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "tests/clab/telemetry/prometheus/prometheus.yaml",
    "content": "global:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-telemetry-consul-agent:8500\n        services:\n          - collectors-gnmic-api\n          - aggregators-gnmic-api\n          - prometheus-cluster\n          - prometheus-collector\n"
  },
  {
    "path": "tests/clab/telemetry/telemetry.clab.yaml",
    "content": "name: telemetry\n\ntopology:\n  defaults:\n    kind: linux\n    image: gnmic:0.0.0-rc1\n    \n  nodes:\n    gnmic1:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic1\n        GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic1:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic1\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic1:9804\n    \n    gnmic2:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic2\n        GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic2:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic2\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic2:9805\n\n    gnmic3:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic3\n        GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic3:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic3\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic3:9806\n\n    agg-gnmic1:\n      binds:\n        - ./gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7893:7893\n        - 9807:9807\n      env:\n        GNMIC_API: :7893\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic1\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic1:9807\n\n    agg-gnmic2:\n      binds:\n        - ./gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7894:7894\n        - 9808:9808\n      env:\n        GNMIC_API: :7894\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic2\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic2:9808\n\n    agg-gnmic3:\n      binds:\n        - ./gnmic-agg.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7895:7895\n        - 9809:9809\n      env:\n        GNMIC_API: :7895\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic3\n        GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic3:9809\n\n    nats:\n      kind: linux\n      image: nats:latest\n      ports:\n         - 4222:4222        \n    \n    kafka-server:\n      kind: linux\n      image: bitnami/kafka:latest\n      ports:\n        - 9092:9092\n      env:\n        KAFKA_CFG_ZOOKEEPER_CONNECT: clab-telemetry-zookeeper-server:2181\n        KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-telemetry-kafka-server:9092\n        ALLOW_PLAINTEXT_LISTENER: \"yes\"\n        JMX_PORT: 9000\n\n    zookeeper-server:\n      kind: linux\n      image: bitnami/zookeeper:latest\n      ports:\n        - 2181:2181\n      env:\n        ALLOW_ANONYMOUS_LOGIN: \"yes\"    \n\n    influxdb:\n      kind: linux\n      image: influxdb:1.8.10\n      ports:\n        - 8086:8086\n      env:\n        INFLUXDB_DATA_ENGINE: tsm1\n        INFLUXDB_REPORTING_DISABLED: \"false\"\n        INFLUXDB_USER: gnmic\n        INFLUXDB_USER_PASSWORD: gnmic\n        INFLUXDB_DB: telemetry\n\n    consul-agent:\n      image: consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n    \n    prometheus:\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro\n        - ../../dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000\n      env:\n        GF_AUTH_DISABLE_LOGIN_FORM: \"true\"\n        GF_AUTH_ANONYMOUS_ENABLED: \"true\"\n        GF_AUTH_ANONYMOUS_ORG_NAME: Main Org.\n        GF_AUTH_ANONYMOUS_ORG_ROLE: Admin\n        GF_USERS_ALLOW_SIGN_UP: \"false\"\n"
  },
  {
    "path": "tests/clab/test_lab1.clab.yaml",
    "content": "name: test1\n\ntopology:\n  defaults:\n    kind: nokia_srlinux\n  kinds:\n    nokia_srlinux:\n      image: ghcr.io/nokia/srlinux:23.10.3\n  nodes:\n    srl1:\n    srl2:\n    srl3:\n  links:\n    - endpoints: [\"srl1:e1-1\", \"srl2:e1-1\"]\n    - endpoints: [\"srl2:e1-2\", \"srl3:e1-1\"]\n    - endpoints: [\"srl3:e1-2\", \"srl1:e1-2\"]\n"
  },
  {
    "path": "tests/cleanup.sh",
    "content": "#!/bin/bash\n\n# cleanup\nrm -f gnmic-rc1\n# delete downloaded yang files\nsudo rm -rf srl-latest-yang-models\n# destroy lab\nsudo clab destroy -t clab/$1.clab.yaml --cleanup\n"
  },
  {
    "path": "tests/cluster_checks.sh",
    "content": "#!/bin/bash\n\nsource ./cluster_funcs.sh\n\nprint_clusters \n"
  },
  {
    "path": "tests/cluster_funcs.sh",
    "content": "#!/bin/bash\n\nfunction check_num_locked_targets() {\n  ## check number of locked targets\n  locked_count=$(consul kv get -recurse gnmic/collectors/targets | wc -l) \n  expected_node_count=$1\n  if [[ $locked_count -ne $expected_node_count ]]\n  then\n    printf \"Number of locked nodes is not %s, it's %s... time to panic\\n\" $expected_node_count $locked_count\n    exit 1\n  fi\n  printf \"Number of locked nodes          : %s\\n\" $locked_count\n  printf \"Expected number of locked nodes : %s\\n\" $expected_node_count\n  print_clusters\n}\n\nfunction print_clusters() {\n  printf \"Clusters:\\n\"\n  consul kv get -recurse gnmic | awk -F: '{print $1}' | awk -F/ '{print $2}' | uniq | nl -w1 -s') '\n  printf \"\\n\"\n  print_single_cluster aggregators\n  print_single_cluster collectors\n}\n\nfunction get_instance_api_endpoint() {\n  service_instance=$1\"-api\"\n  res=$(curl -s http://127.0.0.1:8500/v1/agent/services | jq --arg si \"$service_instance\" '.[$si]' | jq -r '(.Address+ \":\" + (.Port|tostring))')\n  protocol=\"http://\"\n  for t in $(curl -s http://127.0.0.1:8500/v1/agent/services | jq --arg si \"$service_instance\" '.[$si]' | jq -r .Tags[] )\n    do\n      if [[ \"$t\" = protocol=* ]]\n        then\n          protocol=$(echo $t | awk -F= '{print $2}')\n      fi\n    done\n  echo $protocol\"://\"$res\n}\n\nfunction print_single_cluster() {\n  cluster_name=$1\n  printf \"Cluster name                    : %s\\n\" $cluster_name\n  printf \"Number of locked nodes          : %s\\n\" $(consul kv get -recurse gnmic/$cluster_name/targets | wc -l) \n  printf \"gNMIc cluster leader            : %s\\n\" $(consul kv get -recurse gnmic/$cluster_name/leader | awk -F: '{print $2}')\n  for instance in $(consul kv get -recurse gnmic/$cluster_name/targets | awk -F: '{print $2}' | sort | uniq)\n    do\n      api_endpoint=$(get_instance_api_endpoint $instance)\n      printf \"%s:\\n\" $instance\n      printf \"\\t API endpoint           : %s\\n\" $api_endpoint\n      printf \"\\t locked nodes           : %s\\n\" $(get_number_of_locked_nodes $cluster_name $instance)\n      printf \"\\t nodes in config        : %s\\n\" $(get_number_of_configured_nodes $api_endpoint)\n      printf \"\\t handled nodes          : %s\\n\" $(get_number_of_handled_nodes $api_endpoint)\n    done\n  \n  printf \"Instance to target mapping      :\\n\"\n  consul kv get -recurse gnmic/$cluster_name/targets | awk -F/ '{print $4}' | awk -F: '{print \"\\t\"$2\":\\t\"$1}' | sort | nl -w2 -s')'\n  printf \"\\n\"\n}\n\nfunction get_number_of_locked_nodes() {\n  cluster_name=$1\n  instance=$2\n  echo $(consul kv get -recurse gnmic/$cluster_name/targets | grep $instance | wc -l)\n}\n\nfunction get_number_of_configured_nodes() {\n  api_endpoint=$1\n  echo $(curl -s $api_endpoint/api/v1/config/targets | jq -r 'keys[]' | wc -l)\n}\n\nfunction get_number_of_handled_nodes() {\n  api_endpoint=$1\n  echo $(curl -s $api_endpoint/api/v1/targets | jq -r 'keys[]' | wc -l)\n}\n"
  },
  {
    "path": "tests/configs/gnmic1.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\ndebug: true\n\nsubscriptions:\n  sub1:\n    paths:\n      - /system \n    mode: once\n"
  },
  {
    "path": "tests/configs/gnmic2.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\ndebug: true\n\naddress:\n  - clab-test1-srl1\n  - clab-test1-srl2\n  - clab-test1-srl3\n\nsubscriptions:\n  sub1:\n    paths:\n      - /system \n    mode: once\n"
  },
  {
    "path": "tests/configs/gnmic3.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\ndebug: true\ntargets:\n  clab-test1-srl1:\n  clab-test1-srl2:\n  clab-test1-srl3:\n\nsubscriptions:\n  sub1:\n    paths:\n      - /system \n    mode: once\n"
  },
  {
    "path": "tests/configs/gnmic4.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\ndebug: true\ntargets:\n  clab-test1-srl1:\n  clab-test1-srl2:\n  clab-test1-srl3:\n\nsubscriptions:\n  sub1:\n    paths:\n      - /system \n    mode: once\n  sub2:\n    paths:\n      - /acl\n    mode: once\n"
  },
  {
    "path": "tests/configs/gnmic_env.yaml",
    "content": "address: $CUSTOM_ADDR\nskip-verify: $SKIPVER"
  },
  {
    "path": "tests/configs/node/interface.json",
    "content": "{\n    \"admin-state\": \"enable\",\n    \"description\": \"dummy_description\"\n}"
  },
  {
    "path": "tests/configs/node/interface.yaml",
    "content": "admin-state: enable\ndescription: \"dummy description2\"\n"
  },
  {
    "path": "tests/configs/node/replace_request_file.yaml",
    "content": "replaces:\n  - path: /interface[name=ethernet-1/1]\n    value:\n      admin-state: enable\n      description: dummy_description1\n    encoding: json_ietf\n  - path: /interface[name=ethernet-1/2]\n    value:\n      admin-state: enable\n      description: dummy_description2\n    encoding: json_ietf\n"
  },
  {
    "path": "tests/configs/node/update_request_file.yaml",
    "content": "updates:\n  - path: /interface[name=ethernet-1/1]\n    value:\n      admin-state: enable\n      description: dummy_description1\n    encoding: json_ietf\n  - path: /interface[name=ethernet-1/2]\n    value:\n      admin-state: enable\n      description: dummy_description2\n    encoding: json_ietf\n"
  },
  {
    "path": "tests/consul_templates/all_services.tpl",
    "content": "{{ range services -}}\n{{ .Name }}:\n{{- range service .Name }}\n  {{ .Address }}\n{{- end }}\n\n{{ end -}}\n"
  },
  {
    "path": "tests/dashboards/gNMIc/gnmic_compute_metrics.json",
    "content": "{\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": \"-- Grafana --\",\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"gnetId\": null,\n  \"graphTooltip\": 0,\n  \"links\": [],\n  \"panels\": [\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineStyle\": {\n              \"fill\": \"solid\"\n            },\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 16,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"process_open_fds\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Open File Descriptors (#)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 0\n      },\n      \"id\": 4,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"go_goroutines\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Go Routines (#)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 10\n      },\n      \"id\": 14,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"go_memstats_stack_inuse_bytes/1000000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Memory - Stack In Use (MB)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 10\n      },\n      \"id\": 6,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"go_memstats_alloc_bytes/1000000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} mem alloc\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Memory Alloc (MB)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 20\n      },\n      \"hiddenSeries\": false,\n      \"id\": 10,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.1.3\",\n      \"pointradius\": 2,\n      \"points\": true,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"go_memstats_heap_inuse_bytes/1000000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"Memory - Heap inUse (MB)\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 20\n      },\n      \"id\": 2,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"go_gc_duration_seconds*1000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} quantile={{quantile}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Go GC duration (ms)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 30\n      },\n      \"id\": 12,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"rate(go_memstats_mallocs_total[1m])/1000000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Memory malloc MB/s\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 10,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": true,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 30\n      },\n      \"id\": 8,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"mean\",\n            \"lastNotNull\",\n            \"max\",\n            \"min\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"pluginVersion\": \"8.1.3\",\n      \"targets\": [\n        {\n          \"expr\": \"rate(go_memstats_alloc_bytes_total[1m])/1000000\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"timeFrom\": null,\n      \"timeShift\": null,\n      \"title\": \"Memory - alloc MB/s \",\n      \"type\": \"timeseries\"\n    }\n  ],\n  \"refresh\": \"10s\",\n  \"schemaVersion\": 30,\n  \"style\": \"dark\",\n  \"tags\": [],\n  \"templating\": {\n    \"list\": []\n  },\n  \"time\": {\n    \"from\": \"now-30m\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {},\n  \"timezone\": \"\",\n  \"title\": \"gNMIc Compute metrics\",\n  \"uid\": \"EYxvhi77k\",\n  \"version\": 1\n}"
  },
  {
    "path": "tests/dashboards/gNMIc/gnmic_grpc_metrics.json",
    "content": "{\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": \"-- Grafana --\",\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"gnetId\": null,\n  \"graphTooltip\": 0,\n  \"links\": [],\n  \"panels\": [\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 16,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"lastNotNull\",\n            \"first\",\n            \"min\",\n            \"max\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"targets\": [\n        {\n          \"exemplar\": true,\n          \"expr\": \"gnmic_docker_loader_number_of_loaded_targets\",\n          \"interval\": \"\",\n          \"legendFormat\": \"\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Docker loader - number of loaded targets\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 0\n      },\n      \"hiddenSeries\": false,\n      \"id\": 2,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.1.3\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"rate(grpc_client_msg_received_total[1m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client Msg Rcv/second\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 10\n      },\n      \"id\": 14,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"last\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"targets\": [\n        {\n          \"exemplar\": true,\n          \"expr\": \"rate(gnmic_docker_loader_number_of_docker_list_total[30s])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Number of docker loader runs per second\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 10\n      },\n      \"hiddenSeries\": false,\n      \"id\": 4,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.1.3\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"grpc_client_started_total\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client started\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 20\n      },\n      \"id\": 12,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"targets\": [\n        {\n          \"exemplar\": true,\n          \"expr\": \"rate(gnmic_subscribe_number_of_received_subscribe_response_messages_total[1m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"number of received subscribe response (msg/s)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 20\n      },\n      \"hiddenSeries\": false,\n      \"id\": 6,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.1.3\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"rate(grpc_client_msg_sent_total[1m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Client Msg Sent/s\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    },\n    {\n      \"datasource\": null,\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"always\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 30\n      },\n      \"id\": 10,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"lastNotNull\",\n            \"first\",\n            \"min\",\n            \"max\"\n          ],\n          \"displayMode\": \"table\",\n          \"placement\": \"bottom\"\n        },\n        \"tooltip\": {\n          \"mode\": \"single\"\n        }\n      },\n      \"targets\": [\n        {\n          \"exemplar\": true,\n          \"expr\": \"gnmic_cluster_number_of_locked_targets\",\n          \"interval\": \"\",\n          \"legendFormat\": \"\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Cluster - number of locked targets (#)\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": null,\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 10,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 30\n      },\n      \"hiddenSeries\": false,\n      \"id\": 8,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": true,\n        \"show\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.1.3\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"grpc_server_started_total\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}} {{grpc_method}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeFrom\": null,\n      \"timeRegions\": [],\n      \"timeShift\": null,\n      \"title\": \"gRPC Server Started\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"buckets\": null,\n        \"mode\": \"time\",\n        \"name\": null,\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"label\": null,\n          \"logBase\": 1,\n          \"max\": null,\n          \"min\": null,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false,\n        \"alignLevel\": null\n      }\n    }\n  ],\n  \"schemaVersion\": 30,\n  \"style\": \"dark\",\n  \"tags\": [],\n  \"templating\": {\n    \"list\": []\n  },\n  \"time\": {\n    \"from\": \"now-30m\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {},\n  \"timezone\": \"\",\n  \"title\": \"gNMIc gRPC metrics\",\n  \"uid\": \"9W_Qzi7nz\",\n  \"version\": 1\n}"
  },
  {
    "path": "tests/deploy.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n# printf \"Installing containerlab...\\n\"\n# bash -c \"$(curl -sL https://get-clab.srlinux.dev)\"\nsudo clab version\nprintf \"\\n\"\nprintf \"Deploying lab $1\\n\"\nsudo clab deploy -t clab/$1.clab.yaml --reconfigure\n"
  },
  {
    "path": "tests/env_vars.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\ntargets=clab-test1-srl1,clab-test1-srl2,clab-test1-srl3\n# create read only role\n./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \\\n        set \\\n        --update-path /system/aaa/authorization \\\n        --update-value '{\"role\": {\"rolename\":\"readonly\"}}'\n\n# craete readonly role\n./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \\\n        set \\\n        --update-path /system/configuration/role[name=readonly]/rule[path-reference=\"/\"]/action \\\n        --update-value \"read\" \\\n        --update-path /system/aaa/authorization/role[rolename=readonly] \\\n        --update-value '{\"services\": [\"gnmi\"]}'\n\n# create a new user\n./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \\\n        set \\\n        --update-path /system/aaa/authentication/user[username=user1]/password \\\n        --update-value \"|Bo|Z%TYe*&\\$P33~\"\n\n# assign readonly role to the new user\n./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \\\n        set \\\n        --update-path /system/aaa/authentication/user[username=user1] \\\n        --update-value '{\"role\": [\"readonly\"]}'\n\n# check user1 has access\n./gnmic-rc1 -u user1 -p '|Bo|Z%TYe*&$P33~' --skip-verify --debug -a $targets -e json_ietf \\\n       get \\\n       --path /system/name\n\n# password from ENV\nGNMIC_PASSWORD=\"|Bo|Z%TYe*&\\$P33~\" ./gnmic-rc1 -u user1 --skip-verify --debug -a $targets -e json_ietf \\\n       get \\\n       --path /system/name\n\n# Username from ENV\nGNMIC_USERNAME=user1 ./gnmic-rc1 -p '|Bo|Z%TYe*&$P33~' --skip-verify --debug -a $targets -e json_ietf \\\n       get \\\n       --path /system/name\n\n# both username and password from env\nGNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' ./gnmic-rc1 --skip-verify --debug -a $targets -e json_ietf \\\n       get \\\n       --path /system/name\n\n# username, password and debug from env\nGNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_DEBUG=true ./gnmic-rc1 --skip-verify -a $targets -e json_ietf \\\n       get \\\n       --path /system/name\n\n# all global flags from env\nGNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_DEBUG=true GNMIC_SKIP_VERIFY=true GNMIC_ENCODING=json_ietf GNMIC_ADDRESS=$targets ./gnmic-rc1 \\\n       get \\\n       --path /system/name\n\n## config file expansion\nCUSTOM_ADDR=$targets GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_SKIP_VERIFY=true GNMIC_ENCODING=json_ietf ./gnmic-rc1 --config configs/gnmic_env.yaml --debug \\\n       get \\\n       --path /system/name\nCUSTOM_ADDR=$targets GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_SKIP_VERIFY=true SKIPVER=false GNMIC_ENCODING=json_ietf ./gnmic-rc1 --config configs/gnmic_env.yaml --debug \\\n       get \\\n       --path /system/dns"
  },
  {
    "path": "tests/generate_cmd.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\nDIR_NAME=\"$(pwd)/srl-latest-yang-models\"\n\ndocker pull ghcr.io/nokia/srlinux\nid=$(docker create ghcr.io/nokia/srlinux)\nmkdir -p $DIR_NAME\nsudo docker cp $id:/opt/srlinux/models/. $DIR_NAME\nsudo docker rm $id\nls -l srl-latest-yang-models\n\nsudo sed -i 's|modifier \"invert-match\";|//modifier \"invert-match\";|g' srl-latest-yang-models/srl_nokia/models/common/srl_nokia-common.yang\n\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\"\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --camel-case\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --snake-case\n\n./gnmic-rc1 generate --path /network-instance/protocols/bgp --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\"\n#./gnmic-rc1 generate --path /network-instance/protocols/bgp --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models/ietf --exclude \".tools.\" --camel-case\n./gnmic-rc1 generate --path /network-instance/protocols/bgp --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --snake-case\n\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --config-only\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --config-only --camel-case\n./gnmic-rc1 generate --path /interface/subinterface --file  srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude \".tools.\" --config-only --snake-case\n"
  },
  {
    "path": "tests/generate_path_cmd.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --path-type gnmi\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --config-only\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --with-prefix\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --types\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json --config-only\n./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json --path-type gnmi\n"
  },
  {
    "path": "tests/get_cmd.sh",
    "content": "#!/bin/bash \n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n# get\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \\\n                              --path /system/name/host-name\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \\\n                              --path /system/name/host-name\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name\n\n# get multi paths\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n\n# comma separated paths\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n\n# get multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \\\n                               --path /system/name/host-name\n# get multi hosts and paths\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \\\n                              --path /system/name/host-name \\\n                              --path /system/gnmi-server\n"
  },
  {
    "path": "tests/loaders.sh",
    "content": "#!/bin/bash\n\nexport SHELLOPTS\nset -eET\n\nfailure() {\n  local lineno=$1\n  local msg=$2\n  echo \"Failed at line $lineno: $msg\"\n}\n\nexport -f failure\n\nfunction cleanup() {\n  echo \"gnmic_config_file: gnmic-docker-loader.yaml\" > clab/loaders/loaders.clab_vars.yaml\n  sudo clab des --cleanup -t clab/loaders/loaders.clab.yaml\n  docker image prune -f\n}\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\ntrap cleanup EXIT\ntrap cleanup SIGINT\n\n# build docker image\ndocker build -t gnmic:0.0.0-rc1 ../\n\nstart=`date +%s`\n\n# docker loader\necho \"gnmic_config_file: gnmic-docker-loader.yaml\" > clab/loaders/loaders.clab_vars.yaml\nsudo clab dep -t clab/loaders/loaders.clab.yaml --reconfigure\nsleep 45\nsudo clab des -t clab/loaders/loaders.clab.yaml --cleanup\n\n# file loader\n# change gnmic config file\necho \"gnmic_config_file: gnmic-file-loader.yaml\" > clab/loaders/loaders.clab_vars.yaml\n\n# deploy lab with file loader\necho \"clab-loaders-srl1:\" > ./clab/loaders/targets/targets.yaml\necho \"clab-loaders-srl2:\" >> ./clab/loaders/targets/targets.yaml\necho \"clab-loaders-srl3:\" >> ./clab/loaders/targets/targets.yaml\n\nsudo clab dep -t clab/loaders/loaders.clab.yaml --reconfigure\nsleep 45\n./api.sh clab-loaders-gnmic1:7890\n./api.sh clab-loaders-gnmic2:7891\n./api.sh clab-loaders-gnmic3:7892\n./api.sh clab-loaders-agg-gnmic1:7893\n./api.sh clab-loaders-agg-gnmic2:7894\n./api.sh clab-loaders-agg-gnmic3:7895\n\necho \"clab-loaders-srl1:\" > ./clab/loaders/targets/targets.yaml\necho \"clab-loaders-srl2:\" >> ./clab/loaders/targets/targets.yaml\nsleep 45\n./api.sh clab-loaders-gnmic1:7890\n./api.sh clab-loaders-gnmic2:7891\n./api.sh clab-loaders-gnmic3:7892\n./api.sh clab-loaders-agg-gnmic1:7893\n./api.sh clab-loaders-agg-gnmic2:7894\n./api.sh clab-loaders-agg-gnmic3:7895\n\necho \"clab-loaders-srl1:\" > ./clab/loaders/targets/targets.yaml\necho \"clab-loaders-srl3:\" >> ./clab/loaders/targets/targets.yaml\nsleep 45\n./api.sh clab-loaders-gnmic1:7890\n./api.sh clab-loaders-gnmic2:7891\n./api.sh clab-loaders-gnmic3:7892\n./api.sh clab-loaders-agg-gnmic1:7893\n./api.sh clab-loaders-agg-gnmic2:7894\n./api.sh clab-loaders-agg-gnmic3:7895\n\nsudo clab des -t clab/loaders/loaders.clab.yaml --cleanup\n"
  },
  {
    "path": "tests/metrics/gnmic.yaml",
    "content": "username: admin\npassword: NokiaSrl1!\nskip-verify: true\nencoding: json_ietf\nlog: true\n\nloader:\n  type: docker\n  debug: true\n  enable-metrics: true\n  filters:\n    - containers:\n      - label=clab-node-kind: srl\n        label=containerlab: metrics\n    \nsubscriptions:\n  # Add subscriptions configuration here\n  # e.g:\n  sub1:\n    paths:\n      - /interface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n  sub2:\n    paths:\n      - /interface/subinterface/statistics\n    stream-mode: sample\n    sample-interval: 10s\n\napi-server:\n  enable-metrics: true\n  \n# clustering config\nclustering:\n  cluster-name: collectors\n  targets-watch-timer: 60s\n  locker:\n    type: consul\n    address: clab-metrics-consul-agent:8500\n\n\noutputs:\n  collector:\n    type: prometheus\n    service-registration:\n      address: clab-metrics-consul-agent:8500\n    event-processors:\n      - trim-prefixes\n      \nprocessors:\n  trim-prefixes:\n    event-strings:\n      value-names:\n        - \".*\"\n      transforms:\n        - path-base:\n            apply-on: \"name\"\n"
  },
  {
    "path": "tests/metrics/grafana/dashboards.yaml",
    "content": "apiVersion: 1\n\nproviders:\n- name: 'gNMIc Internal Metrics'\n  orgId: 1\n  folder: ''\n  type: file\n  disableDeletion: false\n  editable: true\n  options:\n    path: /var/lib/grafana/dashboards\n    foldersFromFilesStructure: true\n"
  },
  {
    "path": "tests/metrics/grafana/datasources/datasource.yaml",
    "content": "apiVersion: 1\n\ndeleteDatasources:\n  - name: Prometheus\n    orgId: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    orgId: 1\n    url: http://clab-metrics-prometheus:9090\n    password:\n    user:\n    database:\n    basicAuth: false\n    basicAuthUser:\n    basicAuthPassword:\n    withCredentials:\n    isDefault: true\n    version: 1\n    editable: true\n"
  },
  {
    "path": "tests/metrics/metrics.clab.yaml",
    "content": "name: metrics\n\ntopology:\n  defaults:\n    kind: linux\n    image: gnmic:0.0.0-rc1\n  kinds:\n    srl:\n      image: ghcr.io/nokia/srlinux\n  nodes:\n  {{- range $id := seq 1 9}}\n    srl{{ $id }}:\n      kind: srl\n  {{- end }}\n    gnmic1:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7890:7890\n        - 9804:9804\n      env:\n        GNMIC_API: :7890\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic1\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic1\n        GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic1:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic1\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic1:9804\n    \n    gnmic2:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7891:7891\n        - 9805:9805\n      env:\n        GNMIC_API: :7891\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic2\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic2\n        GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic2:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic2\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic2:9805\n\n    gnmic3:\n      binds:\n        - ./gnmic.yaml:/app/gnmic.yaml:ro\n        - /var/run/docker.sock:/var/run/docker.sock\n      cmd: '--config /app/gnmic.yaml subscribe'\n      ports:\n        - 7892:7892\n        - 9806:9806\n      env:\n        GNMIC_API: :7892\n        GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic3\n        GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic3\n        GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic3:57400\n        GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic3\n        GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic3:9806\n        \n    consul-agent:\n      image: consul:latest\n      ports:\n        - 8500:8500\n        - 8600:8600/udp\n      cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0'\n\n    prometheus:\n      image: prom/prometheus:latest\n      user: 65534:65534\n      ports:\n        - 9090:9090\n      binds:\n        - ./prometheus/:/etc/prometheus/\n      cmd: |\n        --config.file=/etc/prometheus/prometheus.yaml\n        --web.console.libraries=/usr/share/prometheus/console_libraries\n        --web.console.templates=/usr/share/prometheus/consoles\n        --log.level=debug\n\n    grafana:\n      image: grafana/grafana:latest\n      binds:\n        - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro\n        - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro\n        - ../dashboards/:/var/lib/grafana/dashboards\n      ports:\n        - 3000:3000"
  },
  {
    "path": "tests/metrics/prometheus/prometheus.yaml",
    "content": "global:\n  scrape_interval:     10s \n  evaluation_interval: 10s \n\nscrape_configs:\n  - job_name: 'gnmic'\n    scrape_interval: 10s \n    consul_sd_configs:\n      - server: clab-metrics-consul-agent:8500\n        services:\n          - collectors-gnmic-api\n          - prometheus-collector\n"
  },
  {
    "path": "tests/metrics/run.sh",
    "content": "#!/bin/bash\n\ncase \"$1\" in\n  \"build\")\n     docker build -t gnmic:0.0.0-rc1 ../../\nesac\n\nsudo clab dep -t metrics.clab.yaml --reconfigure\n\nsleep 60\n\ncurl http://clab-metrics-gnmic1:7890/metrics\ncurl http://clab-metrics-gnmic2:7891/metrics\ncurl http://clab-metrics-gnmic3:7892/metrics"
  },
  {
    "path": "tests/run.sh",
    "content": "#!/bin/bash\n\nexport SHELLOPTS\nset -eET\n\nfailure() {\n  local lineno=$1\n  local msg=$2\n  echo \"Failed at line $lineno: $msg\"\n}\nexport -f failure\n\nfunction cleanup() {\n    printf \"cleaning up...\\n\"\n    ./cleanup.sh test_lab1\n}\nexport -f cleanup\n\nfunction contains() {\n  if [[ \"$1\" != *\"$2\"* ]]; then\n    exit 1\n  fi\n}\nexport -f contains\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\ntrap cleanup EXIT\ntrap cleanup SIGINT\n\nexport gnmic_base_cmd=\"./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug\"\n\nfunction buildgNMIc() {\n  printf \"Building gnmic...\\n\"\n  CGO_ENABLED=0 go build -o gnmic-rc1 -ldflags=\"-s -w -X 'github.com/openconfig/gnmic/pkg/version.Commit=$(git rev-parse --short HEAD)' -X 'github.com/openconfig/gnmic/pkg/version.Date=$(date)'\" ../\n}\n\nstart=`date +%s`\n\ncase \"$1\" in\n  \"all\")\n    # build gnmic\n    buildgNMIc\n    # run version cmd\n    ./version_cmd.sh\n    # run generate cmd\n    ./generate_cmd.sh\n    # run generate path cmd\n    ./generate_path_cmd.sh\n    \n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n    # run capabilities cmd tests\n    ./capabilities_cmd.sh\n    # run get cmd tests\n    ./get_cmd.sh\n\n    # redeploy to avoid getting error: rpc error: code = ResourceExhausted desc = Max number of operations per minute (rate-limit) reached (max: 60)\n    ./deploy.sh test_lab1\n    # run set md tests\n    ./set_cmd.sh\n\n    # redeploy to avoid getting error: rpc error: code = ResourceExhausted desc = Max number of operations per minute (rate-limit) reached (max: 60)\n    ./deploy.sh test_lab1\n    # run subscribe once cmd tests \n    ./subscribe_once_cmd.sh\n    # cleanup test_lab1\n    cleanup test_lab1\n    # run loaders tests\n    ./loaders.sh\n    ;;\n  \"version\")\n    # build gnmic\n    buildgNMIc\n\n    # run version cmd\n    ./version_cmd.sh\n    ;;\n  \"generate\")\n    # build gnmic\n    buildgNMIc\n    # run generate cmd\n    ./generate_cmd.sh\n    ./generate_path_cmd.sh\n    ;;\n  \"cap\")\n    # build gnmic\n    buildgNMIc\n\n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n\n    # run capabilities cmd tests\n    ./capabilities_cmd.sh\n    ;;\n  \"get\")\n    # build gnmic\n    buildgNMIc\n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n    # run get cmd tests\n    ./get_cmd.sh\n    ;;\n  \"set\")\n    # build gnmic\n    buildgNMIc\n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n    # run set md tests\n    ./set_cmd.sh\n    ;;\n  \"sub\")\n    # build gnmic\n    buildgNMIc\n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n    # run sub tests\n    ./subscribe_once_cmd.sh\n    ;;\n  \"env\")\n    # build gnmic\n    buildgNMIc\n    # deploy basic 3 nodes lab\n    ./deploy.sh test_lab1\n    # run sub tests\n    ./env_vars.sh\n    ;;\n  \"loaders\")\n    ./loaders.sh\n    ;;\n  *)\n    echo \"./run.sh [ all | version | generate | cap | get | set | sub | loaders ]\"\n    exit 1\n    ;;\nesac\n\n# calculate runtime\nend=`date +%s`\nruntime=$((end-start))\nprintf \"runtime=%ss\\n\" $runtime\n"
  },
  {
    "path": "tests/run_tests.sh",
    "content": "#!/bin/bash\n\nset -e\n\nfunction testmodule\n{\n    cd $1\n    go test -cover ./... -v -count=1\n    cd $SCRIPTPATH/..\n}\n\ndeclare -a modules=(\".\" \"pkg/api\" \"pkg/cache\")\n\nSCRIPTPATH=\"$( cd -- \"$(dirname \"$0\")\" >/dev/null 2>&1 ; pwd -P )\"\n\ncd $SCRIPTPATH/..\n\nfor i in \"${modules[@]}\"\ndo\n    echo \"Running tests for module $i\"\n    testmodule \"$i\"\ndone\n"
  },
  {
    "path": "tests/set_cmd.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n#########\n## SET ##\n#########\n#################\n## SET REPLACE ##\n#################\n\n### set replace with value\n#### single host\n########################\ngnmic_base_cmd=\"./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify\"\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                              -e json_ietf \\\n                              --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\nout=$($gnmic_base_cmd -a clab-test1-srl1 get -e json_ietf \\\n                        --path /interface[name=ethernet-1/1]/description | \n                        jq -r '.[].updates[].values.\"srl_nokia-interfaces:interface/description\"')\ncontains $out \"e1-1_dummy_desc1\"\n########################\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                              --delimiter \"CUSTOM_DELIMITER\" \\\n                              --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1\n                              \n$gnmic_base_cmd -a clab-test1-srl1 set -e json_ietf \\\n                              --replace-path /interface[name=ethernet-1/1]/description \\\n                              --replace-value e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                              --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                              --delimiter \"CUSTOM_DELIMITER\" \\\n                              --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 set -e json_ietf \\\n                              --replace-path /interface[name=ethernet-1/1]/description \\\n                              --replace-value e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                              --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                              --delimiter \"CUSTOM_DELIMITER\" \\\n                              --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \\\n                              --replace-path /interface[name=ethernet-1/1]/description \\\n                              --replace-value e1-1_dummy_desc1\n\n#### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \\\n                              --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf set \\\n                              --replace-path /interface[name=ethernet-1/1]/description \\\n                              --replace-value e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \\\n                              --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \\\n                              --delimiter \"CUSTOM_DELIMITER\" \\\n                              --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf set \\\n                              --replace-path /interface[name=ethernet-1/1]/description \\\n                              --replace-value e1-1_dummy_desc1\n\n### set replace with multiple values\n#### single host\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                          --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                          --delimiter \"CUSTOM_DELIMITER\" \\\n                          --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf set \\\n                          --replace-path /interface[name=ethernet-1/1]/description \\\n                          --replace-value e1-1_dummy_desc1 \\\n                          --replace-path /interface[name=ethernet-1/2]/description \\\n                          --replace-value e1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                          --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                          --delimiter \"CUSTOM_DELIMITER\" \\\n                          --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf set \\\n                          --replace-path /interface[name=ethernet-1/1]/description \\\n                          --replace-value e1-1_dummy_desc1 \\\n                          --replace-path /interface[name=ethernet-1/2]/description \\\n                          --replace-value e1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                          --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                          --delimiter \"CUSTOM_DELIMITER\" \\\n                          --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \\\n                          --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf set \\\n                          --replace-path /interface[name=ethernet-1/1]/description \\\n                          --replace-value e1-1_dummy_desc1 \\\n                          --replace-path /interface[name=ethernet-1/2]/description \\\n                          --replace-value e1-2_dummy_desc1\n\n\n#### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \\\n                          --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf set \\\n                          --replace-path /interface[name=ethernet-1/1]/description \\\n                          --replace-value e1-1_dummy_desc1\n\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \\\n                          --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n                          \n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf set \\\n                          --replace-path /interface[name=ethernet-1/1]/description \\\n                          --replace-value e1-1_dummy_desc1\n\n### set replace with file\n#### JSON file\n##### single host\ncat configs/node/interface.json\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.json\n\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.json \n\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.json\n\n##### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.json\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.json\n\n#### YAML file\n##### single host\ncat configs/node/interface.yaml\n$gnmic_base_cmd -a clab-test1-srl1 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.yaml\n$gnmic_base_cmd -a clab-test1-srl2 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.yaml\n$gnmic_base_cmd -a clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.yaml\n##### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.yaml\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf -d set \\\n                          --replace-path /interface[name=ethernet-1/1] \\\n                          --replace-file configs/node/interface.yaml \n\n\n### set replace with request file\n$gnmic_base_cmd -a clab-test1-srl1 set --request-file configs/node/replace_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl2 set --request-file configs/node/replace_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml\n\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml\n\n################\n## SET UPDATE ##\n################\n\n### set update with value\n#### single host\n$gnmic_base_cmd -a clab-test1-srl1 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n$gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl2 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n$gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n$gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf\n#### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc1 -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc1 -e json_ietf\n\n### set update with file\n#### JSON file\n##### single host\ncat configs/node/interface.json\n$gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf\n##### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf\n#### YAML file\n##### single host\ncat configs/node/interface.yaml\n$gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf\n##### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf\n### set update with request file\n$gnmic_base_cmd -a clab-test1-srl1 set --request-file configs/node/update_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl2 set --request-file configs/node/update_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl3 set --request-file configs/node/update_request_file.yaml\n\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml\n\n## delete\n### single host\n$gnmic_base_cmd -a clab-test1-srl1 set --delete /interface[name=ethernet-1/1]/description\n$gnmic_base_cmd -a clab-test1-srl2 set --delete /interface[name=ethernet-1/1]/description\n$gnmic_base_cmd -a clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description\n### multi hosts\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description\n$gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description\n\n## combined update, replace and delete\n### combined set with value\n\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state\n\n# reset\n$gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \\\n                --delete /interface[name=ethernet-1/1]/description \\\n                --delete /interface[name=ethernet-1/2]/description\n\n$gnmic_base_cmd -a clab-test1-srl1 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl2 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl3 set \\\n                --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \\\n                --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \\\n                --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl1 set -e json_ietf \\\n                --replace-path /interface[name=ethernet-1/1]/description \\\n                --replace-value e1-1_dummy_desc1 \\\n                --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \\\n                --replace-value e1-1.0_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/description \\\n                --update-value e1-2_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \\\n                --update-value e1-2.0_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl2 set -e json_ietf \\\n                --replace-path /interface[name=ethernet-1/1]/description \\\n                --replace-value e1-1_dummy_desc1 \\\n                --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \\\n                --replace-value e1-1.0_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/description \\\n                --update-value e1-2_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \\\n                --update-value e1-2.0_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n\n$gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \\\n                --replace-path /interface[name=ethernet-1/1]/description \\\n                --replace-value e1-1_dummy_desc1 \\\n                --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \\\n                --replace-value e1-1.0_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/description \\\n                --update-value e1-2_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \\\n                --update-value e1-2.0_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state \\\n                --dry-run\n\n$gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \\\n                --replace-path /interface[name=ethernet-1/1]/description \\\n                --replace-value e1-1_dummy_desc1 \\\n                --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \\\n                --replace-value e1-1.0_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/description \\\n                --update-value e1-2_dummy_desc1 \\\n                --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \\\n                --update-value e1-2.0_dummy_desc1 \\\n                --delete /interface[name=ethernet-1/1]/admin-state \\\n                --delete /interface[name=ethernet-1/2]/admin-state\n"
  },
  {
    "path": "tests/subscribe_once_cmd.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n# single host, single path\n./gnmic-rc1 -a clab-test1-srl1 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl2 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n\n# multiple hosts, single path\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n#\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name\n\n# multiple hosts, multiple paths\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n#\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \\\n                              --mode once \\\n                              --path /system/name \\\n                              --path /interface[name=*]\n\n# subscription config from file\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe\n./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml subscribe\n./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml subscribe\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format prototext\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format protojson\n./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format event\n\n# hosts and config from file\n./gnmic-rc1 --config configs/gnmic2.yaml subscribe\n./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format prototext\n./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format protojson\n./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format event\n\n# nodes from targets field\n./gnmic-rc1 --config configs/gnmic3.yaml subscribe \n./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format prototext \n./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format protojson \n./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format event\n\n# multiple once subscriptions\n./gnmic-rc1 --config configs/gnmic4.yaml subscribe\n./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format prototext\n./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format protojson\n./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format event"
  },
  {
    "path": "tests/telemetry_labs.sh",
    "content": "#!/bin/bash\n\nexport SHELLOPTS\nset -eET\n\nfailure() {\n  local lineno=$1\n  local msg=$2\n  echo \"Failed at line $lineno: $msg\"\n}\n\nNUM_LABS=5\nNUM_NODES_PER_LAB=14\n\nexport -f failure\n\nfunction cleanup() {\n    printf \"cleaning up...\\n\"\n    cd clab/telemetry\n    sudo clab destroy -t telemetry.clab.yaml --cleanup\n    cd ../..\n    #\n    for i in `seq 1 $NUM_LABS`\n    do\n      printf \"destroying lab clab/lab%s.clab.yaml\\n\" $i\n      sudo clab destroy -t clab/lab$i.clab.yaml --cleanup\n      rm clab/lab$i.clab.yaml\n      rm -rf .lab$i.clab.yaml\n    done\n}\n\nsource ./cluster_funcs.sh\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\ntrap cleanup EXIT\ntrap cleanup SIGINT\n\nstart=`date +%s`\n# generate lab files\nfor i in `seq 1 $NUM_LABS`\n  do \n    echo 'ID: ' $i | gomplate -f clab/labN.clab.yaml -d data=stdin:///id.yaml -o clab/lab${i}.clab.yaml\n  done\n\n# destroy labs if they are still up.\nfor i in `seq 1 $NUM_LABS`\n  do\n    sudo clab destroy -t clab/lab${i}.clab.yaml --cleanup \n  done\n\n\n# build docker image\ndocker build -t gnmic:0.0.0-rc1 ../\n\n# deploy telemetry lab\necho \"\"\ncd clab/telemetry\nsudo clab deploy -t telemetry.clab.yaml --reconfigure\ncd ../..\n\necho \"\"\n# check all containers are running\ncontainer_count=$(docker ps -f label=containerlab=telemetry -q | wc -l)\nif [ $container_count -ne 13 ]\n  then\n    printf \"Number of telemetry containers is not 13, it's %s... time to panic\\n\" $container_count\n    exit 1\nfi\n\nprintf \"Found %s running containers\\n\" $container_count\n\necho \"\"\necho \"Waiting for services to register...\"\nsleep 30\nprintf \"Consul services:\\n\"\nconsul catalog services -tags\n\necho \"\"\nprintf \"Consul services to instances:\\n\"\nconsul-template -template=\"consul_templates/all_services.tpl:all_services.txt\" -once\ncat all_services.txt\nrm all_services.txt\n\n##################################\n#  Deploying labs with SRL nodes #\n##################################\nwhile true\ndo\nprintf \"Waiting a bit before deploying the nodes\\n\"\necho \"\"\nsleep 10\n\nfor i in `seq 1 $NUM_LABS`\n  do\n    echo \"Deploying lab\" $i\n    sudo clab deploy -t clab/lab${i}.clab.yaml --reconfigure\n  done\necho \"\"\n\nsleep 30\nnodes_count=$(docker ps -f label=clab-node-kind=srl -f label=test=telemetry -q | wc -l)\nprintf \"Found %s running SRL nodes\\n\" $nodes_count\n\nprintf \"Waiting for the next docker loader run before checking the number of locked targets...\\n\"\nsleep 30\n\ncheck_num_locked_targets $(($NUM_NODES_PER_LAB * $NUM_LABS))\nsleep 60\n\necho \"Running API calls...\"\n./api.sh clab-telemetry-gnmic1:7890\n./api.sh clab-telemetry-gnmic2:7891\n./api.sh clab-telemetry-gnmic3:7892\n./api.sh clab-telemetry-agg-gnmic1:7893\n./api.sh clab-telemetry-agg-gnmic2:7894\n./api.sh clab-telemetry-agg-gnmic3:7895\n\necho \"\"\n#start adding and removing labs\necho \"Waiting a bit before starting to add and remove labs...\"\nsleep 10\n## remove 2 labs\nsudo clab destroy -t clab/lab1.clab.yaml --cleanup\nsudo clab destroy -t clab/lab5.clab.yaml --cleanup\nsleep 60\n\ncheck_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 2))))\n\nsleep 60\n## add 1 lab\necho \"Re Deploying lab1\"\nsudo clab deploy -t clab/lab1.clab.yaml --reconfigure\nsleep 60\n\ncheck_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 1))))\nsleep 60\n\n## add 1 lab and remove 1\necho \"Destroying lab2, Adding back lab5\"\nsudo clab deploy -t clab/lab5.clab.yaml --reconfigure\nsudo clab destroy -t clab/lab2.clab.yaml --cleanup\nsleep 60\n\ncheck_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 1))))\nsleep 60\n\necho \"Running API calls...\"\n./api.sh clab-telemetry-gnmic1:7890\n./api.sh clab-telemetry-gnmic2:7891\n./api.sh clab-telemetry-gnmic3:7892\n./api.sh clab-telemetry-agg-gnmic1:7893\n./api.sh clab-telemetry-agg-gnmic2:7894\n./api.sh clab-telemetry-agg-gnmic3:7895\n\necho \"Re Deploying lab2\"\nsudo clab deploy -t clab/lab2.clab.yaml --reconfigure\nsleep 60\ncheck_num_locked_targets $(($NUM_NODES_PER_LAB * $NUM_LABS))\n\nfor i in `seq 1 $NUM_LABS`\n    do\n      printf \"destroying lab clab/lab%s.clab.yaml\\n\" $i\n      sudo clab destroy -t clab/lab$i.clab.yaml --cleanup\n      # rm clab/lab$i.clab.yaml\n      # rm -rf .lab$i.clab.yaml\n    done\nsleep 60\ndone\n#######\n# END #\n#######\n\n# calculate runtime\nend=`date +%s`\nruntime=$((end-start))\nprintf \"runtime=%ss\\n\" $runtime\n"
  },
  {
    "path": "tests/version_cmd.sh",
    "content": "#!/bin/bash\n\ntrap 'failure ${LINENO} \"$BASH_COMMAND\"' ERR\n\n# version\n./gnmic-rc1 version\n./gnmic-rc1 version --format json\n./gnmic-rc1 version upgrade\n./gnmic-rc1 version upgrade --use-pkg"
  }
]