[
  {
    "path": ".dockerignore",
    "content": "data/\n.build/\n.tarballs/\n\n!.build/linux-amd64/\n!.build/linux-arm64/\n!.build/linux-armv7/\n!.build/linux-ppc64le/\n!.build/linux-riscv64/\n!.build/linux-s390x/\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\n\npatreon: thomaspeitz\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug.yml",
    "content": "name: 🐞 Bug\ndescription: File a bug report\ntitle: \"[BUG] <title>\"\nlabels: [bug]\nbody:\n- type: checkboxes\n  attributes:\n    label: Is there an existing issue for this?\n    description: Please search to see if an issue already exists for the bug you encountered.\n    options:\n    - label: I have searched the existing issues\n      required: true\n- type: textarea\n  attributes:\n    label: YACE version\n    description: The output of running `yace version`.\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Config file\n    description: The config file passed to the `--config.file` option.\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Current Behavior\n    description: A concise description of what you're experiencing.\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Expected Behavior\n    description: A concise description of what you expected to happen.\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Steps To Reproduce\n    description: Steps to reproduce the behavior.\n    placeholder: |\n      1. In this environment...\n      2. With this config...\n      3. Run '...'\n      4. See error...\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Anything else?\n    description: |\n      Links? References? Anything that will give us more context about the issue you are encountering!\n\n      Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.\n  validations:\n    required: false\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature.yml",
    "content": "name: 🆕 Feature\ndescription: Request a new feature\ntitle: \"[FEATURE] <title>\"\nlabels: [enhancement]\nbody:\n- type: checkboxes\n  attributes:\n    label: Is there an existing issue for this?\n    description: Please search to see if an issue already exists for the feature you are requesting.\n    options:\n    - label: I have searched the existing issues\n      required: true\n- type: textarea\n  attributes:\n    label: Feature description\n    description: A concise description of what you're expecting.\n  validations:\n    required: true\n- type: textarea\n  attributes:\n    label: What might the configuration look like?\n    description: Example configuration (useful as a baseline during development).\n    placeholder: |\n      ```yml\n      discovery:\n        jobs:\n        - type: <name of service>\n          period: 30\n          length: 600\n          metrics:\n          - name: SomeExportedMetric\n            statistics: [Minimum, Maximum]\n      ```\n  validations:\n    required: false\n- type: textarea\n  attributes:\n    label: Anything else?\n    description: |\n      Links? References? Anything that will give us more context about the issue you are encountering!\n\n      Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.\n  validations:\n    required: false\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: gomod\n    directory: /\n    schedule:\n      interval: monthly\n    open-pull-requests-limit: 10\n    groups:\n      aws-sdk-v2:\n        patterns:\n          - \"github.com/aws/aws-sdk-go-v2*\"\n  - package-ecosystem: github-actions\n    directory: /\n    schedule:\n      interval: monthly\n    open-pull-requests-limit: 10\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    tags:\n      - 'v*'\n    branches:\n      - master\n  pull_request:\n  workflow_call:\n\njobs:\n  test_go:\n    name: Go tests\n    runs-on: ubuntu-latest\n    container:\n      image: quay.io/prometheus/golang-builder:1.26-base\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - uses: prometheus/promci-setup@5af30ba8c199a91d6c04ebdc3c48e630e355f62d # v0.1.0\n      - run: make test\n\n  build:\n    name: Build for common architectures\n    runs-on: ubuntu-latest\n    if: |\n      !(github.event_name == 'push' && github.event.ref == 'refs/heads/master')\n      &&\n      !(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v'))\n    strategy:\n      matrix:\n        thread: [ 0, 1, 2 ]\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - uses: prometheus/promci/build@769ee18070cd21cfc2a24fa912349fd3e48dee58 # v0.6.0\n        with:\n          promu_opts: \"-p linux/amd64 -p windows/amd64 -p darwin/amd64 -p linux/arm64 -p windows/arm64 -p darwin/arm64\"\n          parallelism: 3\n          thread: ${{ matrix.thread }}\n\n  build_all:\n    name: Build for all architectures\n    runs-on: ubuntu-latest\n    if: |\n      (github.event_name == 'push' && github.event.ref == 'refs/heads/master')\n      ||\n      (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v'))\n    strategy:\n      matrix:\n        thread: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ]\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - uses: prometheus/promci/build@769ee18070cd21cfc2a24fa912349fd3e48dee58 # v0.6.0\n        with:\n          parallelism: 12\n          thread: ${{ matrix.thread }}\n\n  verify-example-configs:\n    name: Verify\n    runs-on: ubuntu-latest\n    container:\n      image: quay.io/prometheus/golang-builder:1.26-base\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - run: make build\n      - name: Verify example configs\n        run: find ./examples -name \"*.yml\" -print0 | xargs -0 -I % ./yace verify-config -config.file %\n\n  publish_master:\n    name: Publish master branch artifacts\n    runs-on: ubuntu-latest\n    needs: [test_go, build_all, verify-example-configs]\n    if: |\n      (github.repository == 'prometheus-community/yet-another-cloudwatch-exporter')\n      &&\n      (github.event_name == 'push' && github.event.ref == 'refs/heads/master')\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - uses: prometheus/promci/publish_main@769ee18070cd21cfc2a24fa912349fd3e48dee58 # v0.6.0\n        with:\n          docker_hub_organization: prometheuscommunity\n          docker_hub_login: ${{ secrets.docker_hub_login }}\n          docker_hub_password: ${{ secrets.docker_hub_password }}\n          quay_io_organization: prometheuscommunity\n          quay_io_login: ${{ secrets.quay_io_login }}\n          quay_io_password: ${{ secrets.quay_io_password }}\n\n  publish_release:\n    name: Publish release artifacts\n    runs-on: ubuntu-latest\n    needs: [test_go, build_all, verify-example-configs]\n    if: |\n      (github.repository == 'prometheus-community/yet-another-cloudwatch-exporter')\n      &&\n      (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v0.'))\n    steps:\n      - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - uses: prometheus/promci/publish_release@769ee18070cd21cfc2a24fa912349fd3e48dee58 # v0.6.0\n        with:\n          docker_hub_organization: prometheuscommunity\n          docker_hub_login: ${{ secrets.docker_hub_login }}\n          docker_hub_password: ${{ secrets.docker_hub_password }}\n          quay_io_organization: prometheuscommunity\n          quay_io_login: ${{ secrets.quay_io_login }}\n          quay_io_password: ${{ secrets.quay_io_password }}\n          github_token: ${{ secrets.PROMBOT_GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/container_description.yml",
    "content": "---\nname: Push README to Docker Hub\non:\n  push:\n    paths:\n      - \"README.md\"\n      - \"README-containers.md\"\n      - \".github/workflows/container_description.yml\"\n    branches: [ main, master ]\n\npermissions:\n  contents: read\n\njobs:\n  PushDockerHubReadme:\n    runs-on: ubuntu-latest\n    name: Push README to Docker Hub\n    if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.\n    steps:\n      - name: git checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set docker hub repo name\n        run: echo \"DOCKER_REPO_NAME=$(make docker-repo-name)\" >> $GITHUB_ENV\n      - name: Push README to Dockerhub\n        uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1\n        env:\n          DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}\n          DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}\n        with:\n          destination_container_repo: ${{ env.DOCKER_REPO_NAME }}\n          provider: dockerhub\n          short_description: ${{ env.DOCKER_REPO_NAME }}\n          # Empty string results in README-containers.md being pushed if it\n          # exists. Otherwise, README.md is pushed.\n          readme_file: ''\n\n  PushQuayIoReadme:\n    runs-on: ubuntu-latest\n    name: Push README to quay.io\n    if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.\n    steps:\n      - name: git checkout\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Set quay.io org name\n        run: echo \"DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')\" >> $GITHUB_ENV\n      - name: Set quay.io repo name\n        run: echo \"DOCKER_REPO_NAME=$(make docker-repo-name)\" >> $GITHUB_ENV\n      - name: Push README to quay.io\n        uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1\n        env:\n          DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}\n        with:\n          destination_container_repo: ${{ env.DOCKER_REPO_NAME }}\n          provider: quay\n          # Empty string results in README-containers.md being pushed if it\n          # exists. Otherwise, README.md is pushed.\n          readme_file: ''\n"
  },
  {
    "path": ".github/workflows/golangci-lint.yml",
    "content": "---\n# This action is synced from https://github.com/prometheus/prometheus\nname: golangci-lint\non:\n  push:\n    branches: [main, master, 'release-*']\n    paths:\n      - \"go.sum\"\n      - \"go.mod\"\n      - \"**.go\"\n      - \"scripts/errcheck_excludes.txt\"\n      - \".github/workflows/golangci-lint.yml\"\n      - \".golangci.yml\"\n    tags: ['v*']\n  pull_request:\n\npermissions:  # added using https://github.com/step-security/secure-repo\n  contents: read\n\njobs:\n  golangci:\n    permissions:\n      contents: read  # for actions/checkout to fetch code\n      pull-requests: read  # for golangci/golangci-lint-action to fetch pull requests\n    name: lint\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      - name: Install Go\n        uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n        with:\n          go-version: 1.26.x\n      - name: Install snmp_exporter/generator dependencies\n        run: sudo apt-get update && sudo apt-get -y install libsnmp-dev\n        if: github.repository == 'prometheus/snmp_exporter'\n      - name: Get golangci-lint version\n        id: golangci-lint-version\n        run: echo \"version=$(make print-golangci-lint-version)\" >> $GITHUB_OUTPUT\n      - name: Lint\n        uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0\n        with:\n          args: --verbose\n          version: ${{ steps.golangci-lint-version.outputs.version }}\n"
  },
  {
    "path": ".gitignore",
    "content": ".build\nyet-another-cloudwatch-exporter\n!charts/yet-another-cloudwatch-exporter\nvendor\ndist\n/yace\n*.tar.gz\n"
  },
  {
    "path": ".golangci.yml",
    "content": "version: \"2\"\noutput:\n  formats:\n    text:\n      path: stderr\n      colors: false\nlinters:\n  default: none\n  enable:\n    - asasalint\n    - bodyclose\n    - copyloopvar\n    - errcheck\n    - errorlint\n    - exhaustive\n    - govet\n    - ineffassign\n    - misspell\n    - nilerr\n    - nolintlint\n    - nonamedreturns\n    - predeclared\n    - revive\n    - sloglint\n    - staticcheck\n    - unconvert\n    - unused\n  exclusions:\n    generated: lax\n    presets:\n      - comments\n      - common-false-positives\n      - legacy\n      - std-error-handling\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\nformatters:\n  enable:\n    - gofmt\n    - gofumpt\n    - goimports\n  settings:\n    goimports:\n      local-prefixes:\n        - github.com/prometheus-community/yet-another-cloudwatch-exporter\n  exclusions:\n    generated: lax\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\n"
  },
  {
    "path": ".promu.yml",
    "content": "go:\n    # This must match .circle/config.yml.\n    version: 1.26\nrepository:\n    path: github.com/prometheus-community/yet-another-cloudwatch-exporter\nbuild:\n    binaries:\n        - name: yace\n          path: ./cmd/yace\n    ldflags: |\n        -X github.com/prometheus/common/version.Version={{.Version}}\n        -X github.com/prometheus/common/version.Revision={{.Revision}}\n        -X github.com/prometheus/common/version.Branch={{.Branch}}\n        -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}\n        -X github.com/prometheus/common/version.BuildDate={{date \"20060102-15:04:05\"}}\ntarball:\n    files:\n        - LICENSE\n        - NOTICE\n"
  },
  {
    "path": ".yamllint",
    "content": "---\nextends: default\nignore: |\n  **/node_modules\n  web/api/v1/testdata/openapi_*_golden.yaml\n\nrules:\n  braces:\n    max-spaces-inside: 1\n    level: error\n  brackets:\n    max-spaces-inside: 1\n    level: error\n  commas: disable\n  comments: disable\n  comments-indentation: disable\n  document-start: disable\n  indentation:\n    spaces: consistent\n    indent-sequences: consistent\n  key-duplicates:\n    ignore: |\n      config/testdata/section_key_dup.bad.yml\n  line-length: disable\n  truthy:\n    check-keys: false\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "## main / (unreleased)\n\n## 0.64.0 / 2026-03-27\n\n**Important news and breaking changes**\n\n- BREAKING CHANGE: AWS SDK v1 support has been removed. The `aws-sdk-v1` feature flag is now a no-op and will be silently ignored. AWS SDK v1 reached end-of-support on July 31, 2025. SDK v2 has been the default since v0.63.0 (September 2025). Users who were passing `--enable-feature aws-sdk-v1` should remove the flag, as it no longer has any effect. If you use YACE as a library, the `v1` and `v2` sub-packages under `pkg/clients/` have been removed. All client implementations now live directly in their parent packages (e.g. `pkg/clients/cloudwatch`, `pkg/clients/tagging`, `pkg/clients/account`). Import paths like `pkg/clients/v1`, `pkg/clients/cloudwatch/v2`, etc. must be updated accordingly.\n\n* [CHANGE] Remove AWS SDK v1 support and deprecate `aws-sdk-v1` feature flag by @tristanburgess. #1825\n* [CHANGE] Add Andrii Kushch and Tristan Burgess as maintainers by @cristiangreco. #1788\n* [FEATURE] Implement Enhanced Metrics framework and initial set of metrics by @andriikushch. #1795\n* [FEATURE] Add support for `AWS/EKS` namespace by @LS80. #1760\n* [FEATURE] Split out Bedrock metrics into all needed namespaces by @tristanburgess. #1766\n* [FEATURE] Separate aliases for Bedrock namespaces by @tristanburgess. #1767\n* [ENHANCEMENT] Update Go build to 1.26, replace `gopkg.in/yaml.v2` with supported fork, sync upstream Prometheus files and migrate PromCI tooling by @SuperQ. #1831\n* [ENHANCEMENT] Add AWS/Bedrock GuardrailArn dimension-based resource tagging by @tristanburgess. #1761\n* [ENHANCEMENT] Add DimensionRegexps support for AWS Backup service by @amitshl. #1775\n* [ENHANCEMENT] Add DimensionRegexps for AWS/Cassandra by @bdeore. #1693\n* [ENHANCEMENT] Add DimensionRegexp to ElasticBeanstalk by @benbridts. #1690\n* [ENHANCEMENT] Test exporter with mocked clients by @jeschkies. #1791\n* [ENHANCEMENT] Add privatelink examples to docs by @cuscal-brad. #1765\n* [BUGFIX] Fix AWS SageMaker dimension name handling for case sensitivity by @andriikushch. #1793\n* [BUGFIX] Fix Docker configuration paths for AWS credentials by @andriikushch. #1804\n\n## 0.63.0 / 2025-09-25\n\n**Important news and breaking changes**\n\n- NOTE: As of Prometheus 3.0, UTF-8 strings are valid for metric names and label names. However, for backward compatibility, this release of YACE still uses the old, stricter legacy validation scheme. UTF-8 validation will be enabled in a feature version of YACE, thus requiring that your remote destination is compatible with UTF-8 support.\n\n- BREAKING CHANGE: the AWS SDK v2 is now the default in YACE. Use the flag `aws-sdk-v1` to switch back to SDK v2. Flag `aws-sdk-v2` has been removed.\n\n- NEW FEATURE: `exportAllDataPoints`, enables the inclusion of past metric data points from the CloudWatch response if available.\n\n* [CHANGE] Make aws sdk v2 the default choice by @cristiangreco\n* [FEATURE] Support history data export by @woehrl01\n* [FEATURE] Add AWS/Transfer as available service by @thepalbi\n* [FEATURE] Add auto-discovery for Directory Services(MicrosoftAD) by @RuslanMustaev\n* [FEATURE] Add support for Redshift-Serverless by @nickbazinet\n* [FEATURE] Add db connections avg panel to RDS dashboard by @yduartep\n* [FEATURE] Add example for lambda_edge by @tyagian\n* [FEATURE] sagemaker: additional InferenceComponent support by @tristanburgess\n* [ENHANCEMENT] Update Go version by @SuperQ\n* [ENHANCEMENT] Use Prometheus common version library by @SuperQ\n* [ENHANCEMENT] Update container repositories by @SuperQ\n* [ENHANCEMENT] Speed up build metric name by @jeschkies\n* [ENHANCEMENT] Add guard to hot logging location in associator by @thepalbi\n* [ENHANCEMENT] Update resource association logic to try both with and without dimension fixes by @tristanburgess\n* [ENHANCEMENT] Change discovery runtime model field from Type -> Namespace by @kgeckhart\n* [BUGFIX] Fix `CachingFactory` concurrent usage issues by @andriikushch\n* [BUGFIX] Correctly run tests in CI and fix failing tests by @jeschkies\n* [BUGFIX] Fix doc about non-existing `debug` flag by @zipkid\n* [BUGFIX] Update URL to Helm Chart in docs by @koralowiec\n* [BUGFIX] Add missing license header to `associator_logging_test.go` by @cristiangreco\n* [BUGFIX] Dashboards: replace `scrape_job` label with `job` by @yduartep\n* [BUGFIX] RDS dashboard: use average for cpu utilization to align with AWS best practices by @yduartep\n\n## 0.62.1 / 2025-01-03\n\n**Important news and breaking changes**\n\nBugfix release to address artifacts build error. The most important news is the same as 0.62.0: as of November 2024, YACE is part of prometheus-community. Read more about it in these announcement posts:\n- https://prometheus.io/blog/2024/11/19/yace-joining-prometheus-community/\n- https://grafana.com/blog/2024/11/19/yace-moves-to-prometheus-community/\n\n* [ENHANCEMENT] Adopt log/slog, drop custom logging pkg by @tjhop\n* [ENHANCEMENT] Bump github.com/prometheus/common from 0.60.1 to 0.61.0\n* [ENHANCEMENT] Bump golang.org/x/sync from 0.9.0 to 0.10.0\n* [ENHANCEMENT] Bump the aws-sdk-v2 group\n* [ENHANCEMENT] Synchronize common files from prometheus/prometheus\n* [ENHANCEMENT] Update CHANGELOG format by @SuperQ\n* [BUGFIX] Fix artifact publishing by @SuperQ\n\n## 0.62.0 / 2024-12-19\n\n**Important news and breaking changes**\n\n* As of November 2024, YACE is part of prometheus-community. Read more about it in these announcement posts:\n- https://prometheus.io/blog/2024/11/19/yace-joining-prometheus-community/\n- https://grafana.com/blog/2024/11/19/yace-moves-to-prometheus-community/\n\n**Bugfixes and features**\n\nFeatures:\n* Add ContainerInsights service by @JetSquirrel\n* Add AWS/Scheduler and AWS/ECR services by @andriikushch\n* Add AWS/VpcLattice service by @greymd\n* Add AWS/QuickSight service by @choppedpork\n* Add AWS/Timestream service by @andriikushch\n* Add Network Manager / Cloud WAN support by @kylehodgetts\n* RDS: include RDS Proxy metrics within the RDS namespace by @vitaliyf\n* Mediapackage: include mediapackagev2 namespace by @henrylaiBrightcove\n\nBugs:\n* Add parentheses to sanitize list to prevent invalid metric name generation by @nixargh\n\nDocs:\n* Review and update supported services in README by @cristiangreco\n* Mention support for AWS/MediaPackage by @prathamesh-sonpatki\n* Update README and MAINTAINERS files to mention the move to prometheus-community by @cristiangreco\n\nRefactoring:\n* Start a unified scraper by @kgeckhart\n* Refactor prom metric creation by @kgeckhart\n* Update for Prometheus Community by @SuperQ\n* Update Docker build by @SuperQ\n* Fix linting issues detected by golangci-lint 1.60.3 by @cristiangreco\n* Update build tools and CI to Go 1.23 by @cristiangreco\n\n**Dependencies**\n\n* Bump actions/checkout from 4.2.0 to 4.2.2\n* Bump alpine from 3.20.1 to 3.20.3\n* Bump github.com/aws/aws-sdk-go from 1.54.7 to 1.55.5\n* Bump github.com/aws/smithy-go from 1.22.0 to 1.22.1\n* Bump github.com/prometheus/client_golang from 1.19.1 to 1.20.5\n* Bump github.com/prometheus/common from 0.54.0 to 0.60.1\n* Bump github.com/stretchr/testify from 1.9.0 to 1.10.0\n* Bump github.com/urfave/cli/v2 from 2.27.2 to 2.27.5\n* Bump golang from 1.22 to 1.23\n* Bump golang.org/x/sync from 0.7.0 to 0.9.0\n* Bump golangci/golangci-lint-action from 6.0.1 to 6.1.1\n* Bump grafana/regexp to `20240607082908-2cb410fa05da`\n* Bump the aws-sdk-v2 group\n\n**New contributors**\n\n* @prathamesh-sonpatki made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1465\n* @JetSquirrel made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1463\n* @greymd made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1506\n* @choppedpork made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1477\n* @SuperQ made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1568\n* @prombot made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1570\n* @nixargh made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1563\n* @kylehodgetts made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1580\n* @vitaliyf made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1501\n* @henrylaiBrightcove made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1544\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.61.2...v0.62.0\n\n## 0.61.2 / 2024-06-25\n\nBugfix release to update the `goreleaser` configuration (again!), please refer to the release notes for `0.61.0` for actual code changes.\n\nhttps://github.com/prometheus-community/yet-another-cloudwatch-exporter/releases/tag/v0.61.0\n\n## 0.61.1 / 2024-06-25\n\nBugfix release to update the `goreleaser` configuration, please refer to the release notes for `0.61.0` for actual code changes.\n\nhttps://github.com/prometheus-community/yet-another-cloudwatch-exporter/releases/tag/v0.61.0\n\n## 0.61.0 / 2024-06-25\n\n**Important news and breaking changes**\n\n* This release adds support for AWS account aliases (by @thepalbi). If the role used by YACE has `\"iam:ListAccountAliases\"` permission, the account alias (if any) is added as a label to the `aws_account_info` metric.\n\n**Bugfixes and features**\n\nFeatures:\n* Add AWS/EC2CapacityReservations to the services list by @luismy\n* Add support for MediaPackage metrics by @theunissenne\n* Add AWS/AppRunner as supported service by @fabiiw05\n\nBugs:\n* Fix association with gwlb by @vainiusd\n\nRefactoring:\n* Add support for batching by time params by @kgeckhart\n\n**Dependencies**\n\n* Bump alpine from 3.19.1 to 3.20.1\n* Bump github.com/aws/aws-sdk-go from 1.53.1 to 1.54.7\n* Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.161.4 to 1.162.0 in the aws-sdk-v2 group\n* Bump github.com/prometheus/common from 0.53.0 to 0.54.0\n* Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1\n* Bump goreleaser/goreleaser-action from 5 to 6\n* Bump the aws-sdk-v2 group\n\n**New contributors**\n\n* @luismy made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1341\n* @fabiiw05 made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1433\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.60.0...v0.61.0\n\n## 0.60.0 / 2024-05-14\n\n**Bugfixes and features**\n\nFeatures:\n* add cloudwatch log metric support by @vainiusd\n* feat: add AWS/RUM as supported service by @hexionas\n\nBugs:\n* Fix all value for function_name variable in lambda dashboard by @thepalbi\n* Fix rounding period deprecation notice by @cristiangreco\n\nDocs:\n* README: update config example by @cristiangreco\n* Fix ElastiCache metric namespace typo on README by @Roberdvs\n\nRefactoring:\n* getmetricdata: Move batching to an iterator by @kgeckhart\n\n**Dependencies**\n* Bump github.com/aws/aws-sdk-go from 1.51.21 to 1.53.1\n* Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.156.0 to 1.160.0\n* Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1\n* Bump github.com/prometheus/common from 0.52.3 to 0.53.0\n* Bump github.com/urfave/cli/v2 from 2.27.1 to 2.27.2\n* Bump golangci/golangci-lint-action from 4.0.0 to 5.3.0\n* Bump the aws-sdk-v2 group with 13 updates\n\n**New contributors**\n\n* @Roberdvs made their first contribution\n* @hexionas made their first contribution\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.59.0...v0.60.0\n\n## 0.59.0 / 2024-04-18\n\n**Important news and breaking changes**\n\nThis release brings a bunch of breaking changes:\n* Setting `roundingPeriod` for discovery jobs is deprecated, a warning will be logged at startup. This is being deprecated in favor of always using the metric period. The implementation for `roundingPeriod` can result in inconsistent Start and EndTime between batches. This negates its intent to ensure Start and EndTimes align with the metric period for [CloudWatch best practices](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html). This has the potential to produce data which will look inaccurate when compared against CloudWatch itself driving a lot of confusion. See https://github.com/prometheus-community/yet-another-cloudwatch-exporter/issues/1290 for further context.\n* Setting `delay` at the metric level is deprecated, a warning will be logged at startup. This `delay` configuration has existed for a long time but was never utilized. Deprecating it and eventually removing it was chosen to simplify the configuration. See https://github.com/prometheus-community/yet-another-cloudwatch-exporter/issues/1290#issuecomment-1948904375 for further context.\n* For discovery jobs, the `type` field and the keys of `exportedTagsOnMetrics` must be the AWS namespace rather than the alias (the README contains an up-to-date list of namespaces). Aliases are not allowed anymore. An error will be thrown at startup in an invalid namespace or an alias is used.\n* Some metric names have been changed to avoid duplicating the namespace. This includes:\n  - `aws_es_esreporting_failed_request_sys_err_count` is `aws_es_reporting_failed_request_sys_err_count`\n  - `aws_es_esreporting_failed_request_user_err_count` is `aws_es_reporting_failed_request_user_err_count`\n  - `aws_es_esreporting_request_count` is `aws_es_reporting_request_count`\n  - `aws_es_esreporting_success_count` is `aws_es_reporting_success_count`\n  - `aws_kafka_kafka_app_logs_disk_used` is `aws_kafka_app_logs_disk_used`\n  - `aws_kafka_kafka_data_logs_disk_used` is `aws_kafka_data_logs_disk_used`\n  - `aws_rds_rdsto_aurora_postgre_sqlreplica_lag` is  `aws_rds_to_aurora_postgre_sqlreplica_lag`\n  - `aws_glue_glue_.*` is `aws_glue_.*`\n\nThese breaking changes will allow making the configuration easier to understand and less error prone, and also to build better documentation around supported services.\n\n**Bugfixes and features**\n\nFeatures:\n* Add AWS/SecretsManager to the services list by @taraspos\n* Support partner events buses by @HristoStoyanovYotpo\n* `discovery.exportedTagsOnMetrics`: validate that keys match one of the job types defined by @cristiangreco\n\nRefactoring:\n* Update comment in factory.go by @andriikushch\n* getmetricdata: move window calculator to processor by @kgeckhart\n* promutil: clean up prom metric names that duplicate parts of the namespace by @tristanburgess\n* promutil: rewrite sanitisation funcs for memory optimisation by @cristiangreco\n* Do not allow using aliases as job types in discovery jobs by @cristiangreco\n\n**Dependencies**\n\n* Bump github.com/aws/aws-sdk-go from 1.51.16 to 1.51.21\n* Bump github.com/aws/aws-sdk-go-v2 group\n* Bump github.com/prometheus/common from 0.52.2 to 0.52.3\n\n**New contributors**\n\n* @taraspos made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1330\n* @HristoStoyanovYotpo made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1359\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.58.0...v0.59.0\n\n## 0.58.0 / 2024-04-06\n\n**Bugfixes and features**\n\nFeatures:\n* Simplify CloudWatch API call counters by @kgeckhart\n\nBugs:\n* Fixed issue with generated Prometheus metric name when working with AWS namespaces which have a leading special character, like `/aws/sagemaker/TrainingJobs` by @tristanburgess\n\nRefactoring:\n* Add abstraction for `GetMetricsData` processing by @kgeckhart\n* `GetMetricData`: refactor QueryID generation and result mapping by @kgeckhart\n* Refactored out the name-building part of `promutil.BuildNamespaceInfoMetrics()` and `promutil.BuildMetrics()` into `promutil.BuildMetricName()` by @tristanburgess\n* Set initial maps size in promutil/migrate by @cristiangreco\n\n**Dependencies**\n\n* Bump github.com/aws/aws-sdk-go from 1.50.30 to 1.51.16\n* Bump github.com/prometheus/common from 0.49.0 to 0.52.2\n* Bump golang.org/x/sync from 0.6.0 to 0.7.0\n* Bump the aws-sdk-v2 group with 14 updates\n\n**New contributors**\n\n* @tristanburgess made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1351\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.57.1...v0.58.0\n\n\n## 0.57.1 / 2024-03-07\n\n**Important news and breaking changes**\n\n* Reverted a change from 0.57.0 to fix scraping of ApiGateway resources.\n\n**Bugfixes and features**\n\nBugs:\n* ApiGateway: bugfix to restore FilterFunc for correct mapping of resources by @cristiangreco\n\n**Dependencies**\n\n## What's Changed\n* Bump github.com/aws/aws-sdk-go from 1.50.26 to 1.50.30\n* Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0\n* Bump github.com/prometheus/common from 0.48.0 to 0.49.0\n* Bump github.com/stretchr/testify from 1.8.4 to 1.9.0\n* Bump the aws-sdk-v2 group\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.57.0...v0.57.1\n\n# v0.57.0\n\n**Important news and breaking changes**\n\n* New job setting `includeContextOnInfoMetrics` can be used to include contextual information (account_id, region, and customTags) on \"info\" metrics and cloudwatch metrics. This can be particularly useful when cloudwatch metrics might not be present or when using \"info\" metrics to understand where your resources exist.\n* No more need to add the `apigateway:GET` permissions for ApiGateway discovery jobs, as that API is not being used anymore.\n\n**Bugfixes and features**\n\nFeatures:\n* Add serverless ElastiCache support by @pkubicsek-sb\n* Add GWLB support by @vainiusd\n* Add support for KMS metrics by @daharon\n* Optionally include context labels (account, region, customTags) on info metrics with `includeContextOnInfoMetrics` by @kgeckhart\n* Improve usability and performance of searchTags by @kgeckhart\n* Add metric yace_cloudwatch_getmetricdata_metrics_total by @keyolk\n\nBugs:\n* Fix race condition in scraper registry usage by @cristiangreco\n* Restore default behaviour of returning nil/absent metrics as NaN by @nhinds\n* Remove filtering of ApiGateway namespace resources by @cristiangreco\n\nRefactoring:\n* Refactor dimensions regexp usage for discovery jobs by @cristiangreco\n* Simplify associator usage by @kgeckhart\n* Update build tools and CI to go 1.22 by @cristiangreco\n* Restructure fields on CloudwatchData by @kgeckhart\n\n**Dependencies**\n\n* Bump alpine from 3.19.0 to 3.19.1\n* Bump github.com/aws/aws-sdk-go from 1.49.19 to 1.50.26\n* Bump github.com/aws/smithy-go from 1.19.0 to 1.20.1\n* Bump github.com/prometheus/common from 0.45.0 to 0.48.0\n* Bump golang from 1.21 to 1.22\n* Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0\n* Bump the aws-sdk-v2 group\n\n**New contributors**\n\n* @vainiusd made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1093\n* @daharon made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/1306\n* @keyolk made their first contribution in https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pull/939\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.56.0...v0.57.0\n\n# v0.56.0\n\n**Important news and breaking changes**\n\n* Release v0.55.0 didn't include binaries artifact due to an issue with the release pipeline.\n* The `list-metrics-callback` and `max-dimensions-associator` feature flags have been removed: their behaviour is now the new default.\n\n**Bugfixes and features**\n\nFeatures:\n* Add new CloudWatch API concurrency limiter by @thepalbi\n* Remove feature flag `list-metrics-callback` by @cristiangreco\n* Remove feature flag `max-dimensions-associator` by @cristiangreco\n* Add support for AWS/Bedrock metrics by @thepalbi\n* Add support for AWS/Events by @raanand-dig\n* Add support for AWS/DataSync by @wkneewalden\n* Add support for AWS/IPAM by @pkubicsek-sb\n\nBugs:\n* Remove unsupported MWAA resource filter by @matej-g\n* DDoSProtection: Include regionless protectedResources in us-east-1 by @kgeckhart\n* aws sdk v2: ensure region is respected for all aws clients by @kgeckhart\n* SageMaker: Associator buildLabelsMap to lower case EndpointName to match ARN by @GGonzalezGomez\n* Update goreleaser action by @cristiangreco\n\nRefactoring:\n* Decouple config models from internal models by @cristiangreco\n* Change config Validate() signature to include model conversion by @cristiangreco\n\n**Dependencies**\n\n* Bump actions/setup-go from 4 to 5\n* Bump alpine from 3.18.3 to 3.19.0\n* Bump docker/setup-buildx-action from 2 to 3\n* Bump docker/setup-qemu-action from 2 to 3\n* Bump github.com/aws/aws-sdk-go from 1.45.24 to 1.49.19\n* Bump github.com/aws/smithy-go from 1.17.0 to 1.19.0\n* Bump github.com/prometheus/client_golang from 1.16.0 to 1.18.0\n* Bump github.com/prometheus/common from 0.44.0 to 0.45.0\n* Bump github.com/urfave/cli/v2 from 2.25.7 to 2.27.1\n* Bump golang.org/x/sync from 0.3.0 to 0.6.0\n* Bump goreleaser/goreleaser-action from 4 to 5\n* Bump the aws-sdk-v2 group dependencies\n\n**New contributors**\n\n* @GGonzalezGomez\n* @wkneewalden\n* @pkubicsek-sb\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.55.0...v0.56.0\n\n# v0.55.0\n\n**Important news and breaking changes**\n\n* jobs of type `customNamespace`, which were deprecated in `v0.51.0`, are now **un-deprecated** due to customers' feedback\n* new feature flag `always-return-info-metrics`: return info metrics even if there are no CloudWatch metrics for the resource. This is useful if you want to get a complete picture of your estate, for example if you have some resources which have not yet been used.\n\n**Bugfixes and features**\n\nFeatures:\n* Un-deprecate custom namespace jobs by @cristiangreco\n* scrape: Return resources even if there are no metrics by @iainlane\n* kinesisanalytics application: add tags support by @raanand-dig\n* Add support for AWS/ClientVPN by @hc2p\n* Add support for QLDB by @alexandre-alvarengazh\n\nBugs:\n* main: Initialise logger when exiting if needed by @iainlane\n\nDocs:\n* Create sqs.yml example file by @dverzolla\n\nRefactoring:\n* Update code to go 1.21 by @cristiangreco\n* aws sdk v2 use EndpointResolverV2 by @kgeckhart\n* move duplicated fields from CloudwatchData to a new JobContext by @kgeckhart\n\n**Dependencies**\n\n* Bump github.com/aws/aws-sdk-go from 1.44.328 to 1.45.7\n* Bump the aws-sdk-v2 group with 2 updates\n* Bump actions/checkout from 3 to 4 by\n\n**New Contributors**\n\n* @raanand-dig\n* @dverzolla\n* @iainlane\n* @hc2p\n* @alexandre-alvarengazh\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.54.1...v0.55.0\n\n\n# v0.54.1\n\nBugs:\n* sdk v2: Set RetryMaxAttempts on root config instead client options by @kgeckhart\n* Match FIPS implementation between sdk v1 and sdk v2 by @kgeckhart\n* Fix regex for vpc-endpoint-service by @cristiangreco\n\n**Dependencies**\n\n* Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0\n* Bump github.com/aws/aws-sdk-go from 1.44.327 to 1.44.328\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.54.0...v0.54.1\n\n# v0.54.0\n\n**Bugfixes and features**\n\nFeatures:\n* Log features enabled at startup by @cristiangreco\n* Use go-kit logger and add `log.format` flag by @cristiangreco\n\nBugs:\n* Remove tagged resource requirement from TrustedAdvisor by @kgeckhart\n* Fix: RDS dashboard filtering by job value by @andriikushch\n* Review dimensions regexps for APIGateway by @cristiangreco\n* Fix syntax in rds.libsonnet by @andriikushch\n* Fix the `FilterId` label value selection for s3 dashboard by @andriikushch\n* MaxDimAssociator: loop through all mappings by @cristiangreco\n* MaxDimAssociator: wrap some expensive debug logs by @cristiangreco\n* MaxDimAssociator: compile AmazonMQ broker suffix regex once by @cristiangreco\n* Limit number of goroutines for GetMetricData calls by @cristiangreco\n* Reduce uncessary pointer usage in getmetricdata code path by @kgeckhart\n* Improve perf in discovery jobs metrics to data lookup by @thepalbi\n* Improve FIPS endpoints resolve logic for sdk v1 by @thepalbi\n\nDocs:\n* Add more config examples (ApiGW, SES, SNS, ECS) by @cristiangreco\n\nRefactoring:\n* Refactor clients.Cache -> clients.Factory by @kgeckhart\n* dependabot: use group updates for aws sdk v2 by @cristiangreco\n* Add debug logging to maxdimassociator by @cristiangreco\n\n**Dependencies**\n\nNew dependecies:\n* github.com/go-kit/log v0.2.1\n\nUpdates:\n* Docker image: bump alpine from 3.18.2 to 3.18.3\n* Docker image: bump golang from 1.20 to 1.21\n* Bump github.com/aws/smithy-go from 1.13.5 to 1.14.2\n* Bump github.com/aws/aws-sdk-go and aws-sdk-go-v2 to latest versions\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.53.0...v0.54.0\n\n# v0.53.0\n\n**Bugfixes and features**\n\nServices:\n* Add Auto Discovery Support For Sagemaker by @charleschangdp\n* Add support for AWS/TrustedAdvisor by @cristiangreco\n\nBugs:\n* fix(kafkaconnect): update resource filter by @cgowthaman\n* Validate should fail when no roles are configured by @thepalbi\n* Fix default value for nilToZero and addCloudwatchTimestamp in static job by @cristiangreco\n* ddos protection: Discover resources outside us-east-1\n\n**Dependencies**\n* Bump github.com/aws/aws-sdk-go from 1.44.284 to 1.44.290\n* Bump github.com/aws/aws-sdk-go-v2/service/amp from 1.16.12 to 1.16.13\n* Bump github.com/aws/aws-sdk-go-v2/service/apigatewayv2 from 1.13.12 to 1.13.13\n* Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.26.1 to 1.26.2\n* Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.100.0 to 1.102.0\n* Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0\n* Bump github.com/prometheus/common from 0.43.0 to 0.44.0\n* Bump github.com/urfave/cli/v2 from 2.25.6 to 2.25.7\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.52.0...v0.53.0\n\n# v0.52.0\n\n**Important news and breaking changes**\n\nThis releases introduces the feature flag `aws-sdk-v2` (by @kgeckhart), which changes YACE networking layer to use the AWS sdk v2 package. Read on for more details and considerations.\n\n  * The main benefit of sdk v2 is deserialization/serialization is done via code generation vs reflection which drastically lowers memory/cpu usage for large scrape jobs\n  * Considerations before enabling sdk v2:\n    1. FIPS is not supported in v2 as v2 delegates all URL resolution to the sdk and AWS does not have FIPS compliant endpoints for AutoScaling API and Tagging API. The v1 implementation worked around this by hard coding FIPS URLs where they existed and using non-FIPS URLs otherwise. This work around was not ported to v2 and is unlikely to be ported.\n    2. sdk v2 uses regional sts endpoints by default vs global sts which is [considered legacy by aws](https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html). The `sts-region` job configuration is still respected when setting the region for sts and will be used if provided. If you still require global sts instead of regional set the `sts-region` to `aws-global`.\n\n**Bugfixes and features**\n\nFeatures:\n* Discovery jobs support `recentlyActiveOnly` parameter to reduce number of old metrics returned by CloudWatch API by @PerGon\n* Feature flag `aws-sdk-v2`: use the more performant AWS sdk v2 (see above section) by @kgeckhart\n\nServices:\n* Add support for API Gateway V2 by @matej-g\n* Add support for MediaConvert by @theunissenne\n* Add support for CWAgent by @cristiangreco\n* Add support for memorydb by @glebpom\n\nDocs:\n* ALB example: use Average for ConsumedLCUs by @cristiangreco\n* Update configuration.md: deprecated custom namespace jobs by @wimsymons\n* Update permissions examples and docs in readme by @kgeckhart\n* Add example for ElastiCache by @cristiangreco\n* Update mixin readme by @cristiangreco\n\nBugs:\n* Fix AmazonMQ Broker name dimension match by @cristiangreco\n* Fix invalid GH action file and broken test case by @cristiangreco\n* Fix namespace case in metrics conversion by @cristiangreco\n* Make exporter options a non-global type by @kgeckhart\n* Fix debug logging in discovery jobs by @cristiangreco\n\nRefactoring:\n* Refactor AWS sdk client usage to hide behind new ClientCache by @kgeckhart\n* Introduce model types to replace sdk types in cloudwatch client by @kgeckhart\n\n**Dependencies**\n\nNew dependencies:\n* github.com/aws/aws-sdk-go-v2/config 1.18.27\n* github.com/aws/aws-sdk-go-v2/service/amp 1.16.11\n* github.com/aws/aws-sdk-go-v2/service/apigateway 1.13.13\n* github.com/aws/aws-sdk-go-v2/service/autoscaling 1.28.9\n* github.com/aws/aws-sdk-go-v2/service/cloudwatch 1.26.1\n* github.com/aws/aws-sdk-go-v2/service/databasemigrationservice 1.25.7\n* github.com/aws/aws-sdk-go-v2/service/ec2 1.100.0\n* github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi 1.14.14\n* github.com/aws/aws-sdk-go-v2/service/storagegateway 1.18.14\n\nUpdates:\n* Bump alpine from 3.17.3 to 3.18.2\n* Bump github.com/aws/aws-sdk-go from 1.44.249 to 1.44.284\n* Bump github.com/prometheus/common from 0.42.0 to 0.43.0\n* Bump github.com/sirupsen/logrus from 1.9.0 to 1.9.3\n* Bump github.com/stretchr/testify from 1.8.2 to 1.8.4\n* Bump github.com/urfave/cli/v2 from 2.25.1 to 2.25.6\n* Bump golang.org/x/sync from 0.1.0 to 0.3.0\n* Bump golangci/golangci-lint-action from 3.4.0 to 3.6.0\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.51.0...v0.52.0\n\n# v0.51.0\n\n**Important breaking changes**\n* Jobs of type `customNamespace` are **deprecated** and might be removed in a future release (please reach out if you're still using this feature)\n\n**Bugfixes and features**\n\nFeatures:\n* Add feature flags support by @thepalbi\n* Feature flag `max-dimensions-associator`: new resource-matching algorithm for discovery jobs. It fixes metrics attribution for ECS. Please test it out and report any issue!\n* Feature flag `list-metrics-callback`: reduce memory usage of ListMetrics API requests\n\nServices:\n* Add support for AWS/Usage namespace by @cristiangreco\n* Fix ECS regexes by @cristiangreco\n\nDocs:\n* Add docker compose support for easier development by @thepalbi\n* Add more config examples by @cristiangreco\n* Review docs about embedding yace by @cristiangreco\n\nBugs:\n* Fix for Dockerfile smell DL3007 by @grosa1\n\nRefactoring:\n* Refactor Tagging/CloudWatch clients by @cristiangreco\n* CloudWatch client: split out input builders into separate file by @cristiangreco\n* Refactor promutils migrate functions by @cristiangreco\n* Use grafana/regexp by @cristiangreco\n* Refactor implementation of getFilteredMetricDatas by @cristiangreco\n* Remove uneeded Describe implementation by @kgeckhart\n* Add counter to see if duplicate metrics are still a problem by @kgeckhart\n* Refactor label consistency and duplicates by @kgeckhart\n* Refactor GetMetricData calls in discovery jobs by @cristiangreco\n\n**Dependencies**\n* Bump github.com/aws/aws-sdk-go from 1.44.235 to 1.44.249\n* Bump github.com/prometheus/common from 0.41.0 to 0.42.0\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.50.0...v0.51.0\n\n# v0.50.0\n\n**Important breaking changes**\n* Change `UpdateMetrics` signature to accept options and return error by @cristiangreco -- if you embed YACE as a Go library this is a breaking change.\n\n**Bugfixes and features**\nFeatures:\n* Refactor API clients concurrency handling by @cristiangreco\n* Add feature flags support by @thepalbi\n* Allow discovery jobs to return result even if there are no resources by @kgeckhart\n* Add flag to enable pprof profiling endpoints by @cristiangreco\n\nServices:\n* Add a ResourceFilter to ElasticBeanstalk by @benbridts\n\nDocs:\n* Update config docs format by @cristiangreco\n\nRefactoring:\n* Linting: fix revive issues by @cristiangreco\n* Remove extra error log when no resources are found by @kgeckhart\n* Wrap debug logging in FilterMetricData by @cristiangreco\n* Minor internal refactorings by @cristiangreco\n\n**Dependencies**\n* Bump actions/setup-go from 3 to 4\n* Bump github.com/aws/aws-sdk-go from 1.44.215 to 1.44.235\n* Bump github.com/urfave/cli/v2 from 2.25.0 to 2.25.1\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.49.2...v0.50.0\n\n# v0.49.2\n\n## Bugfixes and features\n* Update release action to use goreleaser docker image v1.16.0\n\n# v0.49.1\n\n## Bugfixes and features\n* Update release action to use Go 1.20\n\n# v0.49.0\n\n## Important breaking changes\n* From now on we're dropping the `-alpha` suffix from the version number. YACE will be considered alpha quality until v1.0.0.\n* The helm chart is now hosted at https://github.com/nerdswords/helm-charts, please refer to the instructions in the new repo.\n\n## Bugfixes and features\nHelm chart:\n* Move helm chart out of this repo by @cristiangreco\n* Update helm repo link in README.md by @cristiangreco\n\nNew services:\n* Add support for Container, queue, and database metrics for MWAA by @millin\n* Add support for acm-pca service by @jutley\n\nDocs updates:\n* Docs review: move \"install\" and \"configuration\" in separate docs by @cristiangreco\n* Docs: Fix example config link by @matej-g\n* Add example config files by @cristiangreco\n\nInternal refactoring:\n* Code refactoring: split out job and api code by @cristiangreco\n* Minor refactoring of pkg/apicloudwatch and pkg/apitagging by @cristiangreco\n* Refactor CW metrics to resource association logic and add tests by @thepalbi\n* Wrap service filter errors by @kgeckhart\n\n## Dependencies\n* Bump github.com/aws/aws-sdk-go from 1.44.194 to 1.44.215\n* Bump github.com/prometheus/common from 0.37.0 to 0.41.0\n* Bump github.com/stretchr/testify from 1.8.1 to 1.8.2\n* Bump github.com/urfave/cli/v2 from 2.24.3 to 2.25.0\n* Bump golang.org/x/sync from 0.0.0-20220722155255-886fb9371eb4 to 0.1.0\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.48.0-alpha...v0.49.0\n\n# v0.48.0-alpha\n\n**Bugfixes and features**:\n* Revert \"Publish helm chart before releasing binaries\".\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.47.0-alpha...v0.48.0-alpha\n\n# v0.47.0-alpha\n\n**Bugfixes and features**:\n* Add Elemental MediaLive, MediaConnect to supported services by @davemt\n* Add support for OpenSearch Serverless by @Hussainoxious\n* Makefile: always add build version ldflags by @cristiangreco\n* Publish helm chart before releasing binaries by @cristiangreco\n* Build with Go 1.20 by @cristiangreco\n\n**Dependencies**:\n* Bump github.com/aws/aws-sdk-go from 1.44.192 to 1.44.194\n* Bump github.com/urfave/cli/v2 from 2.24.2 to 2.24.3\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.46.0-alpha...v0.47.0-alpha\n\n## 0.46.0-alpha / 2023-02-02\n\n**Breaking changes**:\n- If you use Yace as a library: this release changes the package\n  name `pkg/logger` to `pkg/logging`.\n\n**Bugfixes and features**:\n* Fix to set logging level correctly by @cristiangreco\n* ct: disable validate-maintainers by @cristiangreco\n\n**Dependencies**:\n* Bump github.com/aws/aws-sdk-go from 1.44.189 to 1.44.192\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/helm-chart-0.11.0...v0.46.0-alpha\n\n## 0.45.0-alpha / 2023-01-30\n\n**Breaking changes**:\n- Note if you use Yace as a library: this release changes the signature\n  of `config.Load` method.\n\n**Bugfixes and features**:\n* Helm chart update to customize port name by @nikosmeds\n* Clear up docs and re-organize sections by @thepalbi\n* Helm: add README file template by @cristiangreco\n* Config parsing: emit warning messages for invalid configs by @cristiangreco\n* Pre-compile dimensions regexps for supported services by @cristiangreco\n* AWS/DX: add more dimension regexps by @cristiangreco\n\n**Dependencies**:\n* Bump github.com/aws/aws-sdk-go from 1.44.182 to 1.44.189\n* Bump github.com/urfave/cli/v2 from 2.23.7 to 2.24.2\n* Bump golangci/golangci-lint-action from 3.3.1 to 3.4.0\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.44.0-alpha...v0.45.0-alpha\n\n## 0.44.0-alpha / 2023-01-23\n\n**Breaking changes**:\n- Note if you use Yace as a library: this release changes the packages\n  and funcs exported publicly, you will need to review the imports\n  (although signatures are mostly unchanged)\n\n**Bugfixes and features**:\n* Refactor code into separate packages by @cristiangreco\n* Refactor list of supported services and filter funcs by @cristiangreco\n* Wrap debug logging to avoid expensive operations by @cristiangreco\n* Fix to use length of metrics level on customNamespace by @masshash\n* feat: bump helm chart by @rasta-rocket\n* feat: release helm chart when Chart.yml is updated by @rasta-rocket\n* Add test for configuration of services list by @cristiangreco\n* GolangCI: review linters settings by @cristiangreco\n\n**Dependencies**:\n* Bump azure/setup-helm from 1 to 3\n* Bump docker/setup-buildx-action from 1 to 2\n* Bump docker/setup-qemu-action from 1 to 2\n* Bump github.com/aws/aws-sdk-go from 1.44.175 to 1.44.182\n* Bump github.com/prometheus/client_golang from 1.13.0 to 1.14.0\n* Bump helm/chart-releaser-action from 1.4.1 to 1.5.0\n* Bump helm/kind-action from 1.2.0 to 1.5.0\n\n**Full Changelog**: https://github.com/prometheus-community/yet-another-cloudwatch-exporter/compare/v0.43.0-alpha...v0.44.0-alpha\n\n## 0.43.0-alpha / 2023-01-02\n\n* add support to custom namespaces with their dimensions (by @arielly-parussulo)\n* Optimise support for custom namespaces to use GetMetricData API (by @code-haven)\n* GH workflows: run \"publish\" workflows only in this repo. (by @cristiangreco)\n* Bump Go version to 1.19 for CI and docker image. (by @cristiangreco)\n* Fix not to refer to loop variable in a goroutine (by @masshash)\n* Validate tags when converting to prometheus labels (by @cristiangreco)\n* Bump github.com/aws/aws-sdk-go from 1.44.127 to 1.44.167\n* Bump golangci/golangci-lint-action from 3.3.0 to 3.3.1\n* Bump github.com/urfave/cli/v2 from 2.23.0 to 2.23.7\n\n## 0.42.0-alpha / 2022-11-03\n\n* Resolve logging issue (@datsabk)\n* MediaTailor - Correct dimension regex for MT (@scott-mccracken)\n* Helm chart update for optional test-connection pod (@nikosmeds)\n* Helm chart update to set priorityClassName (@nikosmeds)\n* Bump github.com/aws/aws-sdk-go from 1.44.122 to 1.44.127\n* Bump github.com/urfave/cli/v2 from 2.20.3 to 2.23.0\n\n## 0.41.0-alpha / 2022-10-27\n\n* Clean up unused variables. (@cristiangreco)\n* Fix typo: sts-endpoint should be sts-region. (@cristiangreco)\n* Enabled Managed prometheus metrics (@datsabk)\n* Add support for AWS Kafka Connect (@cgowthaman)\n* Import CloudWatch mixin. (@jeschkies)\n* main.go refactoring: define cmd action as a separate func. (@cristiangreco)\n* Add support for EMR Serverless (@cgowthaman)\n\n## 0.40.0-alpha / 2022-09-15\n* Fix typo in Charts.yml (@yasharne)\n* Subcommand `verify-config` actually validates the config file. (@cristiangreco)\n* Add dimensions regex for AmazonMQ. (@cristiangreco)\n* Fix metrics with additional dimensions being not being scraped. (@cristiangreco)\n* Remove unused code, add test for RemoveDuplicateMetrics. (@cristiangreco)\n* Bump github.com/sirupsen/logrus\n* Bump github.com/urfave/cli/v2\n* Bump github.com/aws/aws-sdk-go\n* Bump actions/setup-python\n\n## 0.39.0-alpha / 2022-09-08\n* Improve code quality and unblock this release (cristiangreco)\n* Add helm chart (vkobets)\n* Fix DX metrics (paulojmdias)\n* Fix searchTags and bad dimension name (femiagbabiaka)\n* Handle empty list in filter metric tests (mtt88)\n* Add AWS Elemental MediaTailor support (scott-mccracken)\n* Support storagegateway metrics (sedan07)\n* Filter api gateway resources to skip \"stages\" (ch4rms)\n* Bump aws-sdk, urfave/cli, prometheus/client_golang\n\n## 0.38.0-alpha / 2022-07-13\n\n* Set max page size for tagging API requests (#617)\n* Build with Go 1.18\n\n## 0.37.0-alpha / 2022-07-05\n* New config `dimensionNameRequirements` allows autodiscovery jobs to only\n  fetch metrics that include specified dimensions (jutley)\n* Update deps\n\n## 0.36.2-alpha / 2022-06-29\n* Cost Reduction - Use less API requests if no tagged resources are found (cristiangreco)\n* Update deps\n\n## 0.36.1-alpha / 2022-06-22\n* Use structured logs for logging interface (kgeckhart)\n\n## 0.36.0-alpha / 2022-06-20\n\n* *BREAKING CHANGE FOR LIBRARY USERS* Major refactoring of usage of logging library (kgeckhart)\n* Minor update of deps and security patches (urfave/cli/v2, golangci/golangci-lint-action, github.com/prometheus/client_golang, github.com/stretchr/testify, github.com/aws/aws-sdk-go\n* Updates of Readme (markwallsgrove)\n\n## 0.35.0-alpha / 2022-04-26\n* Update dependencies\n* Improve / Document way how to use the exporter as external library (kgeckhart)\n* Refactor label consistency (kgeckhart)\n* Add suppot for vpc-endpoint (AWS/PrivateLinkEndpoints) (aleslash)\n* Add support for vpc-endpoint-service (AWS/PrivateLinkServices) (aleslash)\n\n## 0.34.0-alpha / 2022-03-26\n* Update dependencies\n* Add weekly dependabot updates (jylitalo)\n* Add support for regional sts endpoints (matt-mercer)\n* Add multi-arch docker build (charlie-haley)\n\nNew services\n* Add global accelerator support (charlie-haley)\n* Add AppStream support (jhuesemann)\n* Add Managed Apache Airflow support (sdenham)\n* Add KinesisAnalytics support (gumpt)\n\nBug Fixes\n* Fix targetgroup arn lookup (domcyrus)\n* Fix WorkGroup Dimension are not showing in Athena Metrics (sahajavidya)\n* Improve regex performance (kgeckhart)\n* Fix prometheus reload causing a goroutine leak (gumpt / cristiangreco)\n\nDocs\n* Added help for new contributors (aleslash)\n\n## 0.33.0-alpha / 2021-12-10\n* Add /healthz route which allows to deploy more secure with helm (aleslash)\n* Read DMS replication instance identifier from the DMS API (nhinds)\n\n## 0.32.0-alpha / 2021-11-19\n* [BREAKING] Fix the calculation of start and end times for GetMetricData (csquire)\n```\nfloating-time-window is now replaced with roundingPeriod\n\nSpecifies how the current time is rounded before calculating start/end times for CloudWatch GetMetricData requests. This rounding is optimize performance of the CloudWatch request. This setting only makes sense to use if, for example, you specify a very long period (such as 1 day) but want your times rounded to a shorter time (such as 5 minutes). to For example, a value of 300 will round the current time to the nearest 5 minutes. If not specified, the roundingPeriod defaults to the same value as shortest period in the job.\n```\n* Improve testing / linting (cristiangreco)\n* Verify cli parameters and improve cli parsing (a0s)\n* Allow to configure yace cli parameters via env variables (a0s)\n* Improve error handling of cloudwatch (matthewnolf)\n* Add support for directconnect and route53 health checks\n* Improve throttling handling to AWS APIs (anilkun)\n* Add issue templates to improve support (NickLarsenNZ)\n* Allow setting default values for statistics (surminus)\n* Fix apigateway method and resouce dimension bug (aleslash)\n\nThanks a lot to all contributors! - Lovely to see so much efforts especially in testing\nto get this project more and more stable. - I know we are far away from a nice tested\ncode base but we are improving in the right direction and I really love to see all\nof your efforts there. It is really appreciated from my side.\n\nI just contacted AWS to get some open source credits so we can build some kind of\nend to end tests. This shoud allow us to find tricky bugs earlier and not only when we ship\nthings.\n\nLove to all of you, Thomas!\n\n## 0.31.0-alpha / 2021-09-23\n* [BREAKING] Decoupled scraping is now default. Removed code which allowed to use scraper without it.\n```\n# Those flags are just ignored\n-decoupled-scraping=false\n-decoupled-scraping=true\n```\n* [BREAKING] Small timeframes of scraping can be used again now. In the past yace decided the scraping\n  interval based on config. This magic was removed for simplicity.\n```\n# In the past this would have in some cases still set --scraping-interval 600\n--scraping-interval 10\n# Now it really would scrape every 10 seconds which could introduce big API costs. So please watch\n# your API requests!\n--scraping-interval 10\n```\n* Fix problems with start/endtime of scrapes (klarrio-dlamb)\n* Add support for Database Migration Service metrics\n* Allow to hotreload config via /reload (antoniomerlin)\n\n## 0.30.1-alpha / 2021-09-13\n* *SECURITY* Fix issue with building binaries. Please update to mitigate (https://nvd.nist.gov/vuln/detail/CVE-2020-14039)\n* Thanks jeason81 for reporting this security incident!\n\n## 0.30.0-alpha / 2021-09-07\n* *BREAKING* Introduce new version field to config file (jylitalo)\n```\n# Before\ndiscovery:\n  jobs:\n# After\napiVersion: v1alpha1\ndiscovery:\n  jobs:\n```\n* [BUG] Fix issues with nilToZero (eminugurkenar)\n* [BUG] Fix race condition setting end time for discovery jobs (cristiangreco)\n* Simplify session creation code (jylitalo)\n* Major improvement of aws discovery code (jylitalo)\n* Major rewrite of the async scraping logic (rabunkosar-dd)\n* Add support for AWS/ElasticBeanstalk (andyzasl)\n* Upgrade golang to 1.17\n* Upgrade golang libraries to newest versions\n\n## 0.29.0-alpha / 2021-09-01\nOkay, private things settled. We have a new organisation for\nthe project. Lets boost it and get the open PRs merged!\nThis version is like 0.28.0-alpha but docker images hosted on ghcr.io\nand published via new github organisation nerdswords. Find\ndetails [here](https://medium.com/@IT_Supertramp/reorganizing-yace-79d7149b9584).\n\nThanks to all there waiting and using the product! :)\n\n- *BREAKING CHANGE* Using a new docker registry / organisation:\n```yaml\n# Before\nquay.io/invisionag/yet-another-cloudwatch-exporter:v0.29.0-alpha\n# Now\nghcr.io/nerdswords/yet-another-cloudwatch-exporter:v0.29.0-alpha\n```\n\n## 0.28.0-alpha / 2021-07-09\nSorry folks, I currently struggle a little bit\nto get things merged fast due to a lot of private\nstuff. Really appreciate all your PRs and\nhope to get the bigger ones (which are sadly\nstill not merged yet) into next release.\n\nReally appreciate any person working on this\nproject! - Have a nice day :)\n\n- *BREAKING CHANGE* Added support for specifying an External ID with IAM role Arns (cristiangreco)\n```yaml\n# Before\ndiscovery:\n  jobs:\n  - type: rds\n    roleArns:\n    - \"arn:aws:iam::123456789012:role/Prometheus\"\n# After\ndiscovery:\n  jobs:\n  - type: rds\n    roles:\n    - roleArn: \"arn:aws:iam::123456789012:role/Prometheus\"\n      externalId: \"shared-external-identifier\" # optional\n```\n- Add alias for AWS/Cognito service (tohjustin)\n- Fix logic in dimensions for Transit Gateway Attachments (rhys-evans)\n- Fix bug with scraping intervals (boazreicher)\n- Support arm64 builds (alias-dev)\n- Fix IgnoreLength logic (dctrwatson)\n- Simplify code base (jylitalo)\n- Simplify k8s deployments for new users (mahmoud-abdelhafez)\n- Handle metrics with '%' in their name (darora)\n- Fix classic elb name (nhinds)\n- Skip metrics in edge cases (arvidsnet)\n\nFreshly shipped new integrations:\n- Certificate Manager (mksh)\n- WorkSpaces (kl4w)\n- DDoSProtection / Shield (arvidsnet)\n\n## 0.27.0-alpha / 2021-05-07\n\n- Make exporter a library. (jeschkies)\n- Add CLI option to validate config file (zswanson)\n- Fix multidimensional static metric (nmiculinic)\n- Fix scrapes running in EKS fail after first scrape (rrusso1982)\n- Fix Docker build (jeschkies)\n- Allow to use this project in China (insectme)\n- Fix error retrieving kafka metrics (friedrichg)\n\nFreshly integrated:\n- Add AWS/NetworkFirewall (rhys-evans)\n- Add AWS/Cassandra (bjhaid)\n- Add AWS/AmazonMQ (saez0pub)\n- Add AWS/Athena (haarchri)\n- Add AWS/Neptune (benjaminaaron)\n\nThanks to doc fixes: calvinbui\n\n## 0.26.3-alpha / 2021-03-15\n## 0.26.2-alpha / 2021-03-15\n\n- Fix CI issue\n\n## 0.26.0-alpha / 2021-03-15\n\n- *BREAKING CHANGE* Removed a need to use static dimensions in dynamic jobs in cases, when they cannot be parsed from ARNs (AndrewChubatiuk)\n    ```\n      # Before\n      metrics:\n      - name: NumberOfObjects\n        statistics:\n          - Average\n        additionalDimensions:\n          - name: StorageType\n            value: AllStorageTypes\n      # After\n      metrics:\n      - name: NumberOfObjects\n        statistics:\n          - Average\n    ```\n* *BREAKING CHANGE* Use small case for searchTags config option (AndrewChubatiuk)\n    ```\n    # Before\n    searchTags:\n    - Key: type\n      Value: public\n    # After\n    searchTags:\n    - key: type\n      value: public\n      ```\n* *BREAKING CHANGE* CloudFront renamed from `cf` to `cloudfront`\n    ```\n    # Before\n    - type: cf\n    # After\n    - type: cloudfront\n      ```\n\n- Added regular expressions to parse dimensions from resources (AndrewChubatiuk)\n- Added option to use floating time windows (zqad)\n- Added CLI option to validate config file (zswanson)\n- Added AWS network Firewall (rhys-evans)\n- Fixed multidimensional static metric (nmiculinic)\n- Tidy up code (jylitalo)\n\n## 0.25.0-alpha / 2021-01-05\n\n- *BREAKING CHANGE* Use NaN as default if AWS returns nil (arnitolog)\n- Add autodiscovery for AWS/EC2Spot (singhjagmohan1000)\n- Add autodiscovery for DocumentDB (haarchri)\n- Add autodiscovery for GameLift (jp)\n- Added support for fips compliant endpoints (smcavallo)\n- Update deps and build with golang 1.15 (smcavallo)\n\n## 0.24.0-alpha / 2020-12-07\n\n- Add API Gateway IAM info to README (Botono)\n- Fix sorting of datapoints, add test util functions (Botono)\n- Fix missing DataPoints and improve yace in various ways (vishalraina)\n- Added Github action file to basic validation of incoming PR (vishalraina)\n- Fix info metrics missing (goya)\n- Add rds db clusters (goya)\n- Fix missing labels (goya)\n\n## 0.23.0-alpha / 2020-10-02\n\n- Add sampleCount statistics (udhos)\n- Add WAFv2 support (mksh)\n\n## 0.22.0-alpha / 2020-10-02\n\n- Fix alb issues (reddoggad)\n- Add nlb support (reddoggad)\n\n## 0.21.0-alpha / 2020-09-21\n\n- Big tidy up of code, remove old methods and refactor used ones (jylitalo)\n- Fix crashes where labels are not collected correctly (rrusso1982)\n- Fix pointer bug causing metrics to be missing (jylitalo)\n- Allow more then 25 apigateways to be discovered (udhos)\n\n## 0.20.0-alpha / 2020-07-31\n\n- Add api-gateway support (smcavallo)\n- Improve metrics validation (jylitalo)\n- Fix metrics with '<', '>' chars\n\n## 0.19.1-alpha / 2020-07-17\n\n- Remove error during build\n\n## 0.19.0-alpha / 2020-07-17\nWow what a release. Thanks to all contributors. This is\nour biggest release and it made me a lot of fun to see all those\ncontributions. From small doc changes (love those) to major rewrites\nof big components or new complex features. Thanks!\n\n* *BREAKING CHANGE* Add support for multiple roleArns (jylitalo)\n```yaml\n# Before\n---\ndiscovery:\n  jobs:\n  - type: rds\n    roleArn: \"arn:aws:iam::123456789012:role/Prometheus\"\n# After\ndiscovery:\n  jobs:\n  - type: rds\n    roleArns:\n    - \"arn:aws:iam::123456789012:role/Prometheus\"\n```\n* Upgrade golang from 1.12 to 1.14\n* Major linting of code and improving global code quality. (jylitalo)\n* Improve logging (jylitalo)\n* Add config validation. (daviddetorres)\n* Added support for tags with '@' char included (afroschauer )\n* Added Transit Gateway Attachment Metrics (rhys-evans)\n* Fix information gathering if no data is retrieved by cloudwatch (daviddetorres)\n* Improve docs (calvinbui)\n* Add redshift support (smcavallo)\n* Allow easier configuration through adding period / addCloudwatchTimestamp setting additionally\n  to job level. (rrusso1982)\n* Add initial unit tests (smcavallo)\n* Add new configuration to allow snake case labels (rrusso1982)\n* Fix complex metric dimension bug (rrusso1982)\n* Upgrade golang packages (smcavallo)\n* Set up correct partition for ASG for AWS China and GovCloud Regions (smcavallo)\n* Add ability to set custom tags to discovery job metrics (goya)\n\n## 0.18.0-alpha / 2020-06-15\n* *BREAKING CHANGE* Add support for multiple regions (goya)\n```yaml\n# Before\n---\ndiscovery:\n  jobs:\n  - type: rds\n    region: eu-west-1\n# After\ndiscovery:\n  jobs:\n  - type: rds\n    regions:\n    - eu-west-1\n```\n* Fix missing alb target group metrics (abhi4890 )\n* Added support for step functions (smcavallo)\n\n## 0.17.0-alpha / 2020-05-14\n* Added support for sns / firehose (rhys-evans)\n* Added support for fsx / appsync (arnitolog)\n\n## 0.16.0-alpha / 2020-04-06\n* Hugh rewrite: Decouple scraping and serving metrics. Thanks so much daviddetorres!\n* *BREAKING CHANGE* Decoupled scraping and set scraping interval to 5 minutes.\n```\nThe flag 'decoupled-scraping' makes the exporter to scrape Cloudwatch metrics in background in fixed intervals, in stead of each time that the '/metrics' endpoint is fetched. This protects from the abuse of API requests that can cause extra billing in AWS account. This flag is activated by default.\n\nIf the flag 'decoupled-scraping' is activated, the flag 'scraping-interval' defines the seconds between scrapes. Its default value is 300.\n```\n* Hugh rewrite: Rewrite of metric gathering to reduce API Limit problems. Thanks so much daviddetorres!\n* Improvment of ALB data gathering and filtering (daviddetorres)\n* Detect and fix bug after merge (deanrock)\n* Add cloudfront support (mentos1386)\n\n## 0.15.0-alpha / 2020-02-21\n* Fixed docker run command in README.md (daviddetorres)\n* Added support for Nat Gateway / Transit Gateway / Route 53 Resolver (j-nix)\n* Added support for ECS/ContainerInsights (daviddetorres)\n* Fix pagination for getMetricList (eminugurkenar)\n\n## 0.14.7-alpha / 2020-01-09\n* Change logging to json format (bheight-Zymergen)\n\n## 0.14.6-alpha / 2020-01-03\n* Add support for kafka (eminugurkenar)\n* Add structured json logging (bheight-Zymergen)\n* Increase code readability (bheight-Zymergen)\n* Fix ecs scraping bug (rabunkosar-dd)\n* Fix aws cloudwatch period bug (rabunkosar-dd)\n\n## 0.14.5-alpha / 2019-10-29\n* Fix sts api calls without specifying a region (nhinds)\n* Update aws-sdk to v1.25.21 (nhinds)\n\n## 0.14.4-alpha / 2019-10-25\n* Fix github actions (nhinds)\n* Update aws-sdk-go (deanrock)\n* Avoid appending to a shared dimensions variable from inside a loop (nhinds)\n* Remove hardcoded StorageType dimension from S3 metric (nhinds)\n\n## 0.14.3-alpha / 2019-10-11\n* Fix problems and crashes with ALBs and ELBs (Deepak1100)\n\n## 0.14.2-alpha / 2019-10-04\n* **BREAKING** Changing user in Docker image to be non root to adhere to potential security requirements. (whitlekx)\n* Fix prometheus metric bug with new services with '-' e.g. ecs-svc.\n\n## 0.14.1-alpha / 2019-09-06\n* Was accidentally with code from 01.14.0-alpha released.\n\n## 0.14.0-alpha / 2019-08-24\n* **BREAKING** Default command in Dockerfile is changed to yace. This removes the need to add yace as command.\n```yaml\n# Before\n        command:\n          - \"yace\"\n          - \"--config.file=/tmp/config.yml\"\n# After\n        args:\n          - \"--config.file=/tmp/config.yml\"\n```\n* Add support for Elastic MapReduce (nhinds)\n* Add support for SQS - (alext)\n* Add support for ECS Services as ecs-svc\n* Add support for NLB\n* Add retries to cloudwatch api calls (Deepak1100)\n* Fix dimension labels for static jobs (alext)\n\n## 0.13.7 / 2019-07-09\n* Add region as exported label to metrics\n\n## 0.13.6 / 2019-06-24\n* Fix errors with \"=\" in tags (cdchris12)\n* Add curl to container for easy debugging (cdchris12)\n\n## 0.13.5-alpha / 2019-06-09\n* Limit concurrency of aws calls\n\n## 0.13.4 / 2019-06-03\n* Add Autoscaling group support (wjam)\n* Fix strange AWS namespace bug for static exports (AWS/EC2/API)\n* Add warning if metric length of less than 300s is configured / Interminent metrics\n\n## 0.13.3 / 2019-04-26\n* Fix ALB problems. Target Group metrics are now exported as aws_albtg\n```\naws_albtg_request_count_sum{dimension_LoadBalancer=\"app/Test-ALB/fec38de4cf0cacb1\",dimension_TargetGroup=\"targetgroup/Test/708ecba11979327b\",name=\"arn:aws:elasticloadbalancing:eu-west-1:237935892384916:targetgroup/Test/708dcba119793234\"} 0\n```\n\n## 0.13.2 / 2019-04-26\n* CI problem\n\n## 0.13.1-alpha / 2019-04-03\n* **BREAKING** For some metrics `cloudwatch:ListMetrics` iam permissions are needed. Please update your role!\n* **BREAKING** Add 'v' to indicate it is a version number in docker tag / version output\n```\n# Before\n image: quay.io/invisionag/yet-another-cloudwatch-exporter:0.13.0\n# After\n image: quay.io/invisionag/yet-another-cloudwatch-exporter:v0.13.0\n```\n* Use golang 1.12.0 to build\n* Use goreleaser to release\n* Update aws dependencies\n* Use github actions as CI\n* Migrate dependency management to golang modules\n\n## 0.13.0-alpha / 2019-03-18\n* **BREAKING** For some metrics `cloudwatch:ListMetrics` iam permissions are needed. Please update your role!\n* **BREAKING** As adding cloudwatch timestamp breaks some metrics I decided to not set it as default anymore.\nThis should make it easier for new users to have fun with this project.\nIt fixes for some users `non-histogram and non-summary metrics should not have \"_sum\" suffix` bug.\n```yaml\n# Before\n  metrics:\n    - name: FreeStorageSpace\n      disableTimestamp: true\n# After\n  metrics:\n    - name: FreeStorageSpace\n\n# Before\n  metrics:\n    - name: FreeStorageSpace\n# After\n  metrics:\n    - name: FreeStorageSpace\n      useCloudwatchTimestamp: true\n```\n* Add ability to specify additional dimensions on discovery jobs e.g. for BucketSizeBytes metrics on S3 (abuchananTW)\n* Fix incorrect dimension value in case of alb in discovery config (GeeksWine)\n* Add CLI command to debug output\n* Add DynamoDB support\n\n## 0.12.0 / 2019-02-04\n* **BREAKING** Add the exact timestamps from CloudWatch to the exporter Prometheus metrics (LeePorte)\n* Add a new option `disableTimestamp` to not include a timestamp for a specific metric (it can be useful for sparse metrics, e.g. from S3) (LeePorte)\n* Add support for kinesis (AndrewChubatiuk)\n\n## 0.11.0 / 2018-12-28\n* **BREAKING** Add snake_case to prometheus metrics (sanchezpaco)\n```yaml\n# Before\naws_elb_requestcount_sum\n# After\naws_elb_request_count_sum\n```\n\n* Add optional delay setting to scraping (Deepak1100)\n```yaml\nperiod: 60\nlength: 900\ndelay: 300\n```\n\n## 0.10.0 / 2018-12-03\n* Reduce usage of listMetrics calls (nhinds)\n* Add support of iam roles (nhinds)\n* Add optional roleArn setting, which allows scraping with different roles e.g. pull data from mulitple AWS accounts using cross-acount roles (nhinds)\n```yaml\n    metrics:\n      - name: FreeStorageSpace\n        roleArn: xxx\n        statistics:\n        - 'Sum'\n        period: 600\n        length: 60\n```\n\n## 0.9.0 / 2018-11-16\n* Add lambda support (nhinds)\n* Fix support for listing multiple statistics per metric (nhinds)\n* Add tag labels on metrics for easy querying (nhinds)\n```\n# Before\naws_ec2_cpuutilization_average + on (name) group_left(tag_Name) aws_ec2_info\n\n# After, now name tags are on metrics and no grouping needed\naws_ec2_cpuutilization_average\n```\n\n* **BREAKING** Change config syntax. Now you can define tags which are exported as labels on metrics.\nBefore:\n\n```yaml\ndiscovery:\n  - region: eu-west-1\n    type: \"es\"\n    searchTags:\n      - Key: type\n        Value: ^(easteregg|k8s)$\n    metrics:\n      - name: FreeStorageSpace\n        statistics:\n        - 'Sum'\n        period: 600\n        length: 60\n```\n\nNew Syntax with optional exportedTagsOnMetrics:\n```yaml\ndiscovery:\n  exportedTagsOnMetrics:\n    ec2:\n      - Name\n  jobs:\n    - region: eu-west-1\n      type: \"es\"\n      searchTags:\n        - Key: type\n          Value: ^(easteregg|k8s)$\n      metrics:\n        - name: FreeStorageSpace\n          statistics:\n          - 'Sum'\n          period: 600\n          length: 60\n```\n\n## 0.8.0 / 2018-11-02\n* Added VPN connection metrics (AndrewChubatiuk)\n* Added ExtendedStatistics / percentiles (linefeedse)\n* Added Average Statistic (AndrewChubatiuk)\n\n## 0.7.0-alpha / 2018-10-19\n* ALB Support (linefeedse)\n* Custom lables for static metrics\n\nExample\n```yaml\nstatic:\n  - namespace: AWS/AutoScaling\n    region: eu-west-1\n    dimensions:\n     - name: AutoScalingGroupName\n       value: Test\n    customTags:\n      - Key: CustomTag\n        Value: CustomValue\n    metrics:\n      - name: GroupInServiceInstances\n        statistics:\n        - 'Minimum'\n        period: 60\n        length: 300\n```\n\n## 0.6.1 / 2018-10-09\n* Sanitize colons in tags (linefeedse)\n\n## 0.6.0 / 2018-09-20\n* **BREAKING**: Period/length uses now seconds instead of minutes\n* **BREAKING**: Config file uses new syntax to support static\n* Support of --debug flag which outputs some dev debug informations\n* Support of metrics who are not included in tags api (e.g. autoscaling metrics)\n\nBefore\n```yaml\njobs:\n  - discovery:\n      region: eu-west-1\n      metrics:\n        - name: HealthyHostCount\n          statistics:\n          - 'Minimum'\n          period: 60\n          length: 300\n```\n\nNew Syntax:\n```yaml\ndiscovery:\n  - region: eu-west-1\n    type: elb\n    searchTags:\n      - Key: KubernetesCluster\n        Value: production\n    metrics:\n      - name: HealthyHostCount\n        statistics:\n        - 'Minimum'\n        period: 60\n        length: 300\nstatic:\n  - namespace: AWS/AutoScaling\n    region: eu-west-1\n    dimensions:\n     - name: AutoScalingGroupName\n       value: Test\n    metrics:\n      - name: GroupInServiceInstances\n        statistics:\n        - 'Minimum'\n        period: 60\n        length: 300\n```\n\n## 0.5.0 / 2018-08-07\n* Support of EFS - Elastic File System\n* Support of EBS - Elastic Block Storage\n\n## 0.4.0 / 2018-08-07\n* **BREAKING**: Config file uses list as statistics config option,\nthis should reduce api calls for more than one statistics.\n\nBefore:\n```yaml\njobs:\n  - discovery:\n    metrics:\n        statistics: 'Maximum'\n```\nAfter:\n```yaml\njobs:\n  - discovery:\n    metrics:\n        statistics:\n        - 'Maximum'\n```\n* Start to track changes in CHANGELOG.md\n* Better error handling (discordianfish)\n* Increase speed, not only each jobs threaded but now each metric\n* Add s3 support\n* Fix potential race condition during cloudwatch access\n* Fix bug ignoring period in cloudwatch config\n* Use interfaces for aws access and prepare code for unit tests\n* Implement minimum, average, maximum, sum for cloudwatch api\n* Implement way to handle multiple data returned by cloudwatch\n* Update go dependencies\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Prometheus Community Code of Conduct\n\nPrometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).\n"
  },
  {
    "path": "CONTRIBUTE.md",
    "content": "# CONTRIBUTE\n\n## Steps to Contribute\n\n* We use [golangci-lint](https://github.com/golangci/golangci-lint) for linting the code. Make it sure to install it first.\n* Check out repository running `git clone https://github.com/prometheus-community/yet-another-cloudwatch-exporter.git`\n* For linting, please run `make lint`\n* For building, please run `make build`\n* For running locally, please run `./yace`\n* Best practices:\n  * commit should be as small as possible\n  * branch from the *master* branch\n  * add tests relevant to the fixed bug or new feature\n\n## How to release\n* `git tag v0.13.1-alpha && git push --tags`\n"
  },
  {
    "path": "Dockerfile",
    "content": "ARG ARCH=\"amd64\"\nARG OS=\"linux\"\nFROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest\nLABEL maintainer=\"The Prometheus Authors <prometheus-developers@googlegroups.com>\"\n\nARG ARCH=\"amd64\"\nARG OS=\"linux\"\nCOPY .build/${OS}-${ARCH}/yace /bin/yace\n\nCOPY examples/ec2.yml /etc/yace/config.yml\n\nEXPOSE     5000\nUSER       nobody\nENTRYPOINT [ \"/bin/yace\" ]\nCMD        [ \"--config.file=/etc/yace/config.yml\" ]\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2024 The Prometheus Authors\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MAINTAINERS.md",
    "content": "# Maintainers\n\n- Thomas Peitz (info@thomas-peitz.de / @thomaspeitz)\n- Cristian Greco (cristian.greco@grafana.com / @cristiangreco)\n- Andrii Kushch (andrii.kushch@grafana.com / @andriikushch)\n- Tristan Burgess (tristan.burgess@grafana.com / @tristanburgess)\n"
  },
  {
    "path": "Makefile",
    "content": "# Copyright 2024 The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Needs to be defined before including Makefile.common to auto-generate targets\nDOCKER_ARCHS ?= amd64 armv7 arm64\nDOCKER_REPO  ?= prometheuscommunity\n\ninclude Makefile.common\n\nSTATICCHECK_IGNORE =\n\nDOCKER_IMAGE_NAME ?= yet-another-cloudwatch-exporter\n"
  },
  {
    "path": "Makefile.common",
    "content": "# Copyright The Prometheus Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# A common Makefile that includes rules to be reused in different prometheus projects.\n# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!\n\n# Example usage :\n# Create the main Makefile in the root project directory.\n# include Makefile.common\n# customTarget:\n# \t@echo \">> Running customTarget\"\n#\n\n# Ensure GOBIN is not set during build so that promu is installed to the correct path\nunexport GOBIN\n\nGO           ?= go\nGOFMT        ?= $(GO)fmt\nFIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))\nGOOPTS       ?=\nGOHOSTOS     ?= $(shell $(GO) env GOHOSTOS)\nGOHOSTARCH   ?= $(shell $(GO) env GOHOSTARCH)\n\nGO_VERSION        ?= $(shell $(GO) version)\nGO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))\nPRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\\.(10|[0-9])\\.')\n\nPROMU        := $(FIRST_GOPATH)/bin/promu\npkgs          = ./...\n\nifeq (arm, $(GOHOSTARCH))\n\tGOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)\n\tGO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)\nelse\n\tGO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)\nendif\n\nGOTEST := $(GO) test\nGOTEST_DIR :=\nifneq ($(CIRCLE_JOB),)\nifneq ($(shell command -v gotestsum 2> /dev/null),)\n\tGOTEST_DIR := test-results\n\tGOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --\nendif\nendif\n\nPROMU_VERSION ?= 0.18.1\nPROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz\n\nSKIP_GOLANGCI_LINT :=\nGOLANGCI_LINT :=\nGOLANGCI_LINT_OPTS ?=\nGOLANGCI_LINT_VERSION ?= v2.11.4\nGOLANGCI_FMT_OPTS ?=\n# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.\n# windows isn't included here because of the path separator being different.\nifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))\n\tifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))\n\t\t# If we're in CI and there is an Actions file, that means the linter\n\t\t# is being run in Actions, so we don't need to run it here.\n\t\tifneq (,$(SKIP_GOLANGCI_LINT))\n\t\t\tGOLANGCI_LINT :=\n\t\telse ifeq (,$(CIRCLE_JOB))\n\t\t\tGOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint\n\t\telse ifeq (,$(wildcard .github/workflows/golangci-lint.yml))\n\t\t\tGOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint\n\t\tendif\n\tendif\nendif\n\nPREFIX                  ?= $(shell pwd)\nBIN_DIR                 ?= $(shell pwd)\nDOCKER_IMAGE_TAG        ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))\nDOCKERBUILD_CONTEXT     ?= ./\nDOCKER_REPO             ?= prom\n\n# Check if deprecated DOCKERFILE_PATH is set\nifdef DOCKERFILE_PATH\n$(error DOCKERFILE_PATH is deprecated. Use DOCKERFILE_VARIANTS ?= $(DOCKERFILE_PATH) in the Makefile)\nendif\n\nDOCKER_ARCHS ?= amd64 arm64 armv7 ppc64le riscv64 s390x\nDOCKERFILE_ARCH_EXCLUSIONS ?=\nDOCKER_REGISTRY_ARCH_EXCLUSIONS ?= quay.io:riscv64\nDOCKERFILE_VARIANTS     ?= $(wildcard Dockerfile Dockerfile.*)\n\n# Function to extract variant from Dockerfile label.\n# Returns the variant name from io.prometheus.image.variant label, or \"default\" if not found.\ndefine dockerfile_variant\n$(strip $(or $(shell sed -n 's/.*io\\.prometheus\\.image\\.variant=\"\\([^\"]*\\)\".*/\\1/p' $(1)),default))\nendef\n\n# Check for duplicate variant names (including default for Dockerfiles without labels).\nDOCKERFILE_VARIANT_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)))\nDOCKERFILE_VARIANT_NAMES_SORTED := $(sort $(DOCKERFILE_VARIANT_NAMES))\nifneq ($(words $(DOCKERFILE_VARIANT_NAMES)),$(words $(DOCKERFILE_VARIANT_NAMES_SORTED)))\n$(error Duplicate variant names found. Each Dockerfile must have a unique io.prometheus.image.variant label, and only one can be without a label (default))\nendif\n\n# Build variant:dockerfile pairs for shell iteration.\nDOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df))\n\n# Shell helper to check whether a dockerfile/arch pair is excluded.\ndefine dockerfile_arch_is_excluded\ncase \" $(DOCKERFILE_ARCH_EXCLUSIONS) \" in \\\n\t*\" $$dockerfile:$(1) \"*) true ;; \\\n\t*) false ;; \\\nesac\nendef\n\n# Shell helper to check whether a registry/arch pair is excluded.\n# Extracts registry from DOCKER_REPO (e.g., quay.io/prometheus -> quay.io)\ndefine registry_arch_is_excluded\nregistry=$$(echo \"$(DOCKER_REPO)\" | cut -d'/' -f1); \\\ncase \" $(DOCKER_REGISTRY_ARCH_EXCLUSIONS) \" in \\\n\t*\" $$registry:$(1) \"*) true ;; \\\n\t*) false ;; \\\nesac\nendef\n\nBUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))\nPUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))\nTAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))\n\nSANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))\n\nifeq ($(GOHOSTARCH),amd64)\n        ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))\n                # Only supported on amd64\n                test-flags := -race\n        endif\nendif\n\n# This rule is used to forward a target like \"build\" to \"common-build\".  This\n# allows a new \"build\" target to be defined in a Makefile which includes this\n# one and override \"common-build\" without override warnings.\n%: common-% ;\n\n.PHONY: common-all\ncommon-all: precheck style check_license lint yamllint unused build test\n\n.PHONY: common-style\ncommon-style:\n\t@echo \">> checking code style\"\n\t@fmtRes=$$($(GOFMT) -d $$(git ls-files '*.go' ':!:vendor/*' || find . -path ./vendor -prune -o -name '*.go' -print)); \\\n\tif [ -n \"$${fmtRes}\" ]; then \\\n\t\techo \"gofmt checking failed!\"; echo \"$${fmtRes}\"; echo; \\\n\t\techo \"Please ensure you are using $$($(GO) version) for formatting code.\"; \\\n\t\texit 1; \\\n\tfi\n\n.PHONY: common-check_license\ncommon-check_license:\n\t@echo \">> checking license header\"\n\t@licRes=$$(for file in $$(git ls-files '*.go' ':!:vendor/*' || find . -path ./vendor -prune -o -type f -iname '*.go' -print) ; do \\\n               awk 'NR<=3' $$file | grep -Eq \"(Copyright|generated|GENERATED)\" || echo $$file; \\\n       done); \\\n       if [ -n \"$${licRes}\" ]; then \\\n               echo \"license header checking failed:\"; echo \"$${licRes}\"; \\\n               exit 1; \\\n       fi\n\t@echo \">> checking for copyright years 2026 or later\"\n\t@futureYearRes=$$(git grep -E 'Copyright (202[6-9]|20[3-9][0-9])' -- '*.go' ':!:vendor/*' || true); \\\n\tif [ -n \"$${futureYearRes}\" ]; then \\\n\t\techo \"Files with copyright year 2026 or later found (should use 'Copyright The Prometheus Authors'):\"; echo \"$${futureYearRes}\"; \\\n\t\texit 1; \\\n\tfi\n\n.PHONY: common-deps\ncommon-deps:\n\t@echo \">> getting dependencies\"\n\t$(GO) mod download\n\n.PHONY: update-go-deps\nupdate-go-deps:\n\t@echo \">> updating Go dependencies\"\n\t@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \\\n\t\t$(GO) get $$m; \\\n\tdone\n\t$(GO) mod tidy\n\n.PHONY: common-test-short\ncommon-test-short: $(GOTEST_DIR)\n\t@echo \">> running short tests\"\n\t$(GOTEST) -short $(GOOPTS) $(pkgs)\n\n.PHONY: common-test\ncommon-test: $(GOTEST_DIR)\n\t@echo \">> running all tests\"\n\t$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)\n\n$(GOTEST_DIR):\n\t@mkdir -p $@\n\n.PHONY: common-format\ncommon-format: $(GOLANGCI_LINT)\n\t@echo \">> formatting code\"\n\t$(GO) fmt $(pkgs)\nifdef GOLANGCI_LINT\n\t@echo \">> formatting code with golangci-lint\"\n\t$(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS)\nendif\n\n.PHONY: common-vet\ncommon-vet:\n\t@echo \">> vetting code\"\n\t$(GO) vet $(GOOPTS) $(pkgs)\n\n.PHONY: common-lint\ncommon-lint: $(GOLANGCI_LINT)\nifdef GOLANGCI_LINT\n\t@echo \">> running golangci-lint\"\n\t$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)\nendif\n\n.PHONY: common-lint-fix\ncommon-lint-fix: $(GOLANGCI_LINT)\nifdef GOLANGCI_LINT\n\t@echo \">> running golangci-lint fix\"\n\t$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)\nendif\n\n.PHONY: common-yamllint\ncommon-yamllint:\n\t@echo \">> running yamllint on all YAML files in the repository\"\nifeq (, $(shell command -v yamllint 2> /dev/null))\n\t@echo \"yamllint not installed so skipping\"\nelse\n\tyamllint .\nendif\n\n# For backward-compatibility.\n.PHONY: common-staticcheck\ncommon-staticcheck: lint\n\n.PHONY: common-unused\ncommon-unused:\n\t@echo \">> running check for unused/missing packages in go.mod\"\n\t$(GO) mod tidy\n\t@git diff --exit-code -- go.sum go.mod\n\n.PHONY: common-build\ncommon-build: promu\n\t@echo \">> building binaries\"\n\t$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)\n\n.PHONY: common-tarball\ncommon-tarball: promu\n\t@echo \">> building release tarball\"\n\t$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)\n\n.PHONY: common-docker-repo-name\ncommon-docker-repo-name:\n\t@echo \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)\"\n\n.PHONY: common-docker $(BUILD_DOCKER_ARCHS)\ncommon-docker: $(BUILD_DOCKER_ARCHS)\n$(BUILD_DOCKER_ARCHS): common-docker-%:\n\t@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \\\n\t\tdockerfile=$${variant#*:}; \\\n\t\tvariant_name=$${variant%%:*}; \\\n\t\tif $(call dockerfile_arch_is_excluded,$*); then \\\n\t\t\techo \"Skipping $$variant_name variant for linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\tcontinue; \\\n\t\tfi; \\\n\t\tdistroless_arch=\"$*\"; \\\n\t\tif [ \"$*\" = \"armv7\" ]; then \\\n\t\t\tdistroless_arch=\"arm\"; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\techo \"Building default variant ($$variant_name) for linux-$* using $$dockerfile\"; \\\n\t\t\tdocker build -t \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \\\n\t\t\t\t-f $$dockerfile \\\n\t\t\t\t--build-arg ARCH=\"$*\" \\\n\t\t\t\t--build-arg OS=\"linux\" \\\n\t\t\t\t--build-arg DISTROLESS_ARCH=\"$$distroless_arch\" \\\n\t\t\t\t$(DOCKERBUILD_CONTEXT); \\\n\t\t\tif [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\t\techo \"Tagging default variant with $$variant_name suffix\"; \\\n\t\t\t\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \\\n\t\t\t\t\t\"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\"; \\\n\t\t\tfi; \\\n\t\telse \\\n\t\t\techo \"Building $$variant_name variant for linux-$* using $$dockerfile\"; \\\n\t\t\tdocker build -t \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\" \\\n\t\t\t\t-f $$dockerfile \\\n\t\t\t\t--build-arg ARCH=\"$*\" \\\n\t\t\t\t--build-arg OS=\"linux\" \\\n\t\t\t\t--build-arg DISTROLESS_ARCH=\"$$distroless_arch\" \\\n\t\t\t\t$(DOCKERBUILD_CONTEXT); \\\n\t\tfi; \\\n\tdone\n\n.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)\ncommon-docker-publish: $(PUBLISH_DOCKER_ARCHS)\n$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:\n\t@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \\\n\t\tdockerfile=$${variant#*:}; \\\n\t\tvariant_name=$${variant%%:*}; \\\n\t\tif $(call dockerfile_arch_is_excluded,$*); then \\\n\t\t\techo \"Skipping push for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\tcontinue; \\\n\t\tfi; \\\n\t\tif $(call registry_arch_is_excluded,$*); then \\\n\t\t\techo \"Skipping push for $$variant_name variant on linux-$* to $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\tcontinue; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" != \"Dockerfile\" ] || [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\techo \"Pushing $$variant_name variant for linux-$*\"; \\\n\t\t\tdocker push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\"; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\techo \"Pushing default variant ($$variant_name) for linux-$*\"; \\\n\t\t\tdocker push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\"; \\\n\t\tfi; \\\n\t\tif [ \"$(DOCKER_IMAGE_TAG)\" = \"latest\" ]; then \\\n\t\t\tif [ \"$$dockerfile\" != \"Dockerfile\" ] || [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\t\techo \"Pushing $$variant_name variant version tags for linux-$*\"; \\\n\t\t\t\tdocker push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name\"; \\\n\t\t\tfi; \\\n\t\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\t\techo \"Pushing default variant version tag for linux-$*\"; \\\n\t\t\t\tdocker push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)\"; \\\n\t\t\tfi; \\\n\t\tfi; \\\n\tdone\n\nDOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))\n.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)\ncommon-docker-tag-latest: $(TAG_DOCKER_ARCHS)\n$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:\n\t@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \\\n\t\tdockerfile=$${variant#*:}; \\\n\t\tvariant_name=$${variant%%:*}; \\\n\t\tif $(call dockerfile_arch_is_excluded,$*); then \\\n\t\t\techo \"Skipping tag for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\tcontinue; \\\n\t\tfi; \\\n\t\tif $(call registry_arch_is_excluded,$*); then \\\n\t\t\techo \"Skipping tag for $$variant_name variant on linux-$* for $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\tcontinue; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" != \"Dockerfile\" ] || [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\techo \"Tagging $$variant_name variant for linux-$* as latest\"; \\\n\t\t\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name\"; \\\n\t\t\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name\"; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\techo \"Tagging default variant ($$variant_name) for linux-$* as latest\"; \\\n\t\t\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest\"; \\\n\t\t\tdocker tag \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)\" \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)\"; \\\n\t\tfi; \\\n\tdone\n\n.PHONY: common-docker-manifest\ncommon-docker-manifest:\n\t@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \\\n\t\tdockerfile=$${variant#*:}; \\\n\t\tvariant_name=$${variant%%:*}; \\\n\t\tif [ \"$$dockerfile\" != \"Dockerfile\" ] || [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\techo \"Creating manifest for $$variant_name variant\"; \\\n\t\t\trefs=\"\"; \\\n\t\t\tfor arch in $(DOCKER_ARCHS); do \\\n\t\t\t\tif $(call dockerfile_arch_is_excluded,$$arch); then \\\n\t\t\t\t\techo \"  Skipping $$arch for $$variant_name (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\tif $(call registry_arch_is_excluded,$$arch); then \\\n\t\t\t\t\techo \"  Skipping $$arch for $$variant_name on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\trefs=\"$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\"; \\\n\t\t\tdone; \\\n\t\t\tif [ -z \"$$refs\" ]; then \\\n\t\t\t\techo \"Skipping manifest for $$variant_name variant (no supported architectures)\"; \\\n\t\t\t\tcontinue; \\\n\t\t\tfi; \\\n\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\" $$refs; \\\n\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name\"; \\\n\t\tfi; \\\n\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\techo \"Creating default variant ($$variant_name) manifest\"; \\\n\t\t\trefs=\"\"; \\\n\t\t\tfor arch in $(DOCKER_ARCHS); do \\\n\t\t\t\tif $(call dockerfile_arch_is_excluded,$$arch); then \\\n\t\t\t\t\techo \"  Skipping $$arch for default variant (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\tif $(call registry_arch_is_excluded,$$arch); then \\\n\t\t\t\t\techo \"  Skipping $$arch for default variant on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\trefs=\"$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)\"; \\\n\t\t\tdone; \\\n\t\t\tif [ -z \"$$refs\" ]; then \\\n\t\t\t\techo \"Skipping default variant manifest (no supported architectures)\"; \\\n\t\t\t\tcontinue; \\\n\t\t\tfi; \\\n\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)\" $$refs; \\\n\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)\"; \\\n\t\tfi; \\\n\t\tif [ \"$(DOCKER_IMAGE_TAG)\" = \"latest\" ]; then \\\n\t\t\tif [ \"$$dockerfile\" != \"Dockerfile\" ] || [ \"$$variant_name\" != \"default\" ]; then \\\n\t\t\t\techo \"Creating manifest for $$variant_name variant version tag\"; \\\n\t\t\t\trefs=\"\"; \\\n\t\t\t\tfor arch in $(DOCKER_ARCHS); do \\\n\t\t\t\t\tif $(call dockerfile_arch_is_excluded,$$arch); then \\\n\t\t\t\t\t\techo \"  Skipping $$arch for $$variant_name version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\t\tcontinue; \\\n\t\t\t\t\tfi; \\\n\t\t\t\t\tif $(call registry_arch_is_excluded,$$arch); then \\\n\t\t\t\t\t\techo \"  Skipping $$arch for $$variant_name version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\t\tcontinue; \\\n\t\t\t\t\tfi; \\\n\t\t\t\t\trefs=\"$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name\"; \\\n\t\t\t\tdone; \\\n\t\t\t\tif [ -z \"$$refs\" ]; then \\\n\t\t\t\t\techo \"Skipping version-tag manifest for $$variant_name variant (no supported architectures)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name\" $$refs; \\\n\t\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name\"; \\\n\t\t\tfi; \\\n\t\t\tif [ \"$$dockerfile\" = \"Dockerfile\" ]; then \\\n\t\t\t\techo \"Creating default variant version tag manifest\"; \\\n\t\t\t\trefs=\"\"; \\\n\t\t\t\tfor arch in $(DOCKER_ARCHS); do \\\n\t\t\t\t\tif $(call dockerfile_arch_is_excluded,$$arch); then \\\n\t\t\t\t\t\techo \"  Skipping $$arch for default variant version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\t\tcontinue; \\\n\t\t\t\t\tfi; \\\n\t\t\t\t\tif $(call registry_arch_is_excluded,$$arch); then \\\n\t\t\t\t\t\techo \"  Skipping $$arch for default variant version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)\"; \\\n\t\t\t\t\t\tcontinue; \\\n\t\t\t\t\tfi; \\\n\t\t\t\t\trefs=\"$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)\"; \\\n\t\t\t\tdone; \\\n\t\t\t\tif [ -z \"$$refs\" ]; then \\\n\t\t\t\t\techo \"Skipping default variant version-tag manifest (no supported architectures)\"; \\\n\t\t\t\t\tcontinue; \\\n\t\t\t\tfi; \\\n\t\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)\" $$refs; \\\n\t\t\t\tDOCKER_CLI_EXPERIMENTAL=enabled docker manifest push \"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)\"; \\\n\t\t\tfi; \\\n\t\tfi; \\\n\tdone\n\n.PHONY: promu\npromu: $(PROMU)\n\n$(PROMU):\n\t$(eval PROMU_TMP := $(shell mktemp -d))\n\tcurl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)\n\tmkdir -p $(FIRST_GOPATH)/bin\n\tcp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu\n\trm -r $(PROMU_TMP)\n\n.PHONY: common-proto\ncommon-proto:\n\t@echo \">> generating code from proto files\"\n\t@./scripts/genproto.sh\n\nifdef GOLANGCI_LINT\n$(GOLANGCI_LINT):\n\tmkdir -p $(FIRST_GOPATH)/bin\n\tcurl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \\\n\t\t| sed -e '/install -d/d' \\\n\t\t| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)\nendif\n\n.PHONY: common-print-golangci-lint-version\ncommon-print-golangci-lint-version:\n\t@echo $(GOLANGCI_LINT_VERSION)\n\n.PHONY: precheck\nprecheck::\n\ndefine PRECHECK_COMMAND_template =\nprecheck:: $(1)_precheck\n\nPRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))\n.PHONY: $(1)_precheck\n$(1)_precheck:\n\t@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \\\n\t\techo \"Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?\"; \\\n\t\texit 1; \\\n\tfi\nendef\n\ngovulncheck: install-govulncheck\n\tgovulncheck ./...\n\ninstall-govulncheck:\n\tcommand -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest\n"
  },
  {
    "path": "NOTICE",
    "content": "Copyright 2018-2021 Invision AG\nCopyright 2021-2024 NERDSWORDS\nCopyright 2024 The Prometheus Authors\n"
  },
  {
    "path": "README.md",
    "content": "# YACE - yet another cloudwatch exporter\n\n[![Container on Quay](https://quay.io/repository/prometheuscommunity/yet-another-cloudwatch-exporter/status)][quay]\n[![Container on Docker Hub](https://img.shields.io/docker/pulls/prometheuscommunity/yet-another-cloudwatch-exporter.svg?maxAge=604800)][docker]\n\nYACE, or `yet another cloudwatch exporter`, is a [Prometheus exporter](https://prometheus.io/docs/instrumenting/exporters/#exporters-and-integrations) for [AWS CloudWatch](http://aws.amazon.com/cloudwatch/) metrics. It is written in Go and uses the official AWS SDK.\n\n## News\n\nAs of November 2024, YACE is part of [prometheus-community](https://github.com/prometheus-community). Read more about it in these announcement posts:\n\n* https://prometheus.io/blog/2024/11/19/yace-joining-prometheus-community/\n* https://grafana.com/blog/2024/11/19/yace-moves-to-prometheus-community/\n\n## Alternatives\n\nConsider using the official [CloudWatch Exporter](https://github.com/prometheus/cloudwatch_exporter) if you prefer a Java implementation.\n\n\n## Project Status\n\nWhile YACE is at version less than 1.0.0, expect that any new release might introduce breaking changes. We'll document changes in [CHANGELOG.md](CHANGELOG.md).\n\nWhere feasible, features will be deprecated instead of being immediately changed or removed. This means that YACE will continue to work but might log warning messages. Expect deprecated features to be permanently changed/removed within the next 2/3 releases.\n\n## Security\n\nRead more how to report a security vulnerability in [SECURITY.md](SECURITY.md).\n\n### Supported Versions\n\nOnly the latest version gets security updates. We won't support older versions.\n\n## Features\n\n* Stop worrying about your AWS IDs - Auto discovery of resources via tags\n* Structured logging (json and logfmt)\n* Filter monitored resources via regex\n* Automatic adding of tag labels to metrics\n* Automatic adding of dimension labels to metrics\n* Allows to export 0 even if CloudWatch returns nil\n* Allows exports metrics with CloudWatch timestamps (disabled by default)\n* Static metrics support for all cloudwatch metrics without auto discovery\n* Pull data from multiple AWS accounts using cross-account roles\n* Can be used as a library in an external application\n* Support the scraping of custom namespaces metrics with the CloudWatch Dimensions.\n* Supported services with auto discovery through tags:\n  * `/aws/sagemaker/Endpoints` - Sagemaker Endpoints\n  * `/aws/sagemaker/InferenceRecommendationsJobs` - Sagemaker Inference Recommender Jobs\n  * `/aws/sagemaker/ProcessingJobs` - Sagemaker Processing Jobs\n  * `/aws/sagemaker/TrainingJobs` - Sagemaker Training Jobs\n  * `/aws/sagemaker/TransformJobs` - Sagemaker Batch Transform Jobs\n  * `AmazonMWAA` - Managed Apache Airflow\n  * `AWS/ACMPrivateCA` - ACM Private CA\n  * `AWS/AmazonMQ` - Managed Message Broker Service\n  * `AWS/AOSS` - OpenSearch Serverless\n  * `AWS/ApiGateway` - ApiGateway (V1 and V2)\n  * `AWS/ApplicationELB` - Application Load Balancer\n  * `AWS/AppRunner` - Managed Container Apps Service\n  * `AWS/AppStream` - AppStream\n  * `AWS/AppSync` - AppSync\n  * `AWS/Athena` - Athena\n  * `AWS/AutoScaling` - Auto Scaling Group\n  * `AWS/Backup` - Backup\n  * `AWS/Bedrock` - GenerativeAI\n  * `AWS/Billing` - Billing\n  * `AWS/Cassandra` - Cassandra\n  * `AWS/CertificateManager` - Certificate Manager\n  * `AWS/ClientVPN` - Client-based VPN\n  * `AWS/CloudFront` - Cloud Front\n  * `AWS/Cognito` - Cognito\n  * `AWS/DataSync` - DataSync\n  * `AWS/DDoSProtection` - Distributed Denial of Service (DDoS) protection service\n  * `AWS/DirectoryService` - Directory Services (MicrosoftAD)\n  * `AWS/DMS` - Database Migration Service\n  * `AWS/DocDB` - DocumentDB (with MongoDB compatibility)\n  * `AWS/DX` - Direct Connect\n  * `AWS/DynamoDB` - NoSQL Key-Value Database\n  * `AWS/EBS` - Elastic Block Storage\n  * `AWS/EC2` - Elastic Compute Cloud\n  * `AWS/EC2Spot` - Elastic Compute Cloud for Spot Instances\n  * `AWS/ECR` - Elastic Container Registry\n  * `AWS/ECS` - Elastic Container Service (Service Metrics)\n  * `AWS/EFS` - Elastic File System\n  * `AWS/ElastiCache` - ElastiCache\n  * `AWS/ElasticBeanstalk` - Elastic Beanstalk\n  * `AWS/ElasticMapReduce` - Elastic MapReduce\n  * `AWS/ELB` - Elastic Load Balancer\n  * `AWS/EMRServerless` - Amazon EMR Serverless\n  * `AWS/ES` - ElasticSearch\n  * `AWS/Events` - EventBridge\n  * `AWS/Firehose` - Managed Streaming Service\n  * `AWS/FSx` - FSx File System\n  * `AWS/GameLift` - GameLift\n  * `AWS/GatewayELB` - Gateway Load Balancer\n  * `AWS/GlobalAccelerator` - AWS Global Accelerator\n  * `AWS/IoT` - IoT\n  * `AWS/IPAM` - IP address manager\n  * `AWS/Kafka` - Managed Apache Kafka\n  * `AWS/KafkaConnect` - AWS MSK Connectors\n  * `AWS/Kinesis` - Kinesis Data Stream\n  * `AWS/KinesisAnalytics` - Kinesis Data Analytics for SQL Applications\n  * `AWS/KMS` - Key Management Service\n  * `AWS/Lambda` - Lambda Functions\n  * `AWS/Logs` - CloudWatch Logs\n  * `AWS/MediaConnect` - AWS Elemental MediaConnect\n  * `AWS/MediaConvert` - AWS Elemental MediaConvert\n  * `AWS/MediaLive` - AWS Elemental MediaLive\n  * `AWS/MediaPackage` - AWS Elemental MediaPackage\n  * `AWS/MediaTailor` - AWS Elemental MediaTailor\n  * `AWS/MemoryDB` - AWS MemoryDB\n  * `AWS/MWAA` - Managed Apache Airflow (Container, queue, and database metrics)\n  * `AWS/NATGateway` - NAT Gateway\n  * `AWS/Neptune` - Neptune\n  * `AWS/NetworkELB` - Network Load Balancer\n  * `AWS/NetworkFirewall` - Network Firewall\n  * `AWS/Network Manager` - Network Manager\n  * `AWS/PrivateLinkEndpoints` - VPC Endpoint\n  * `AWS/PrivateLinkServices` - VPC Endpoint Service\n  * `AWS/Prometheus` - Managed Service for Prometheus\n  * `AWS/QLDB` - Quantum Ledger Database\n  * `AWS/QuickSight` - QuickSight (Business Intelligence)\n  * `AWS/RDS` - Relational Database Service\n  * `AWS/Redshift` - Redshift Database\n  * `AWS/Redshift-Serverless` - Redshift Serverless\n  * `AWS/Route53` - Route53 Health Checks\n  * `AWS/Route53Resolver` - Route53 Resolver\n  * `AWS/RUM` - Real User Monitoring\n  * `AWS/S3` - Object Storage\n  * `AWS/Sagemaker/ModelBuildingPipeline` - Sagemaker Model Building Pipelines\n  * `AWS/SageMaker` - Sagemaker invocations\n  * `AWS/Scheduler` - EventBridge Scheduler\n  * `AWS/SecretsManager` - Secrets Manager\n  * `AWS/SES` - Simple Email Service\n  * `AWS/SNS` - Simple Notification Service\n  * `AWS/SQS` - Simple Queue Service\n  * `AWS/States` - Step Functions\n  * `AWS/StorageGateway` - On-premises access to cloud storage\n  * `AWS/Timestream` - Time-series database service\n  * `AWS/TransitGateway` - Transit Gateway\n  * `AWS/TrustedAdvisor` - Trusted Advisor\n  * `AWS/Usage` - Usage of some AWS resources and APIs\n  * `AWS/VpcLattice` - VPC Lattice\n  * `AWS/VPN` - VPN connection\n  * `AWS/WAFV2` - Web Application Firewall v2\n  * `AWS/WorkSpaces` - Workspaces\n  * `ContainerInsights` - EKS ContainerInsights (Dependency on Cloudwatch agent)\n  * `CWAgent` - CloudWatch agent\n  * `ECS/ContainerInsights` - ECS/ContainerInsights (Fargate metrics)\n  * `Glue` - AWS Glue Jobs\n\n## Feature flags\n\nTo provide backwards compatibility, some of YACE's new features or breaking changes might be guarded under a feature flag. Refer to [docs/feature_flags.md](./docs/feature_flags.md) for details.\n\n## Installing and running\n\nRefer to the [installation guide](docs/installation.md).\n\n## Authentication\n\nThe exporter will need to be running in an environment which has access to AWS. The exporter uses the [AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/getting-started/) and supports providing authentication via [AWS's default credential chain](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). Regardless of the method used to acquire the credentials, some permissions are needed for the exporter to work.\n\nAs a quick start, the following IAM policy can be used to grant the all permissions required by YACE\n```json\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Action\": [\n        \"tag:GetResources\",\n        \"cloudwatch:GetMetricData\",\n        \"cloudwatch:GetMetricStatistics\",\n        \"cloudwatch:ListMetrics\",\n        \"apigateway:GET\",\n        \"aps:ListWorkspaces\",\n        \"autoscaling:DescribeAutoScalingGroups\",\n        \"dms:DescribeReplicationInstances\",\n        \"dms:DescribeReplicationTasks\",\n        \"ec2:DescribeTransitGatewayAttachments\",\n        \"ec2:DescribeSpotFleetRequests\",\n        \"shield:ListProtections\",\n        \"storagegateway:ListGateways\",\n        \"storagegateway:ListTagsForResource\",\n        \"iam:ListAccountAliases\"\n      ],\n      \"Effect\": \"Allow\",\n      \"Resource\": \"*\"\n    }\n  ]\n}\n```\n\nIf you would like to remove certain permissions based on your needs the policy can be adjusted based the CloudWatch namespaces you are scraping\n\nThese are the bare minimum permissions required to run Static and Discovery Jobs\n```json\n\"tag:GetResources\",\n\"cloudwatch:GetMetricData\",\n\"cloudwatch:GetMetricStatistics\",\n\"cloudwatch:ListMetrics\"\n```\n\nThis permission is required to discover resources for the AWS/ApiGateway namespace\n```json\n\"apigateway:GET\"\n```\n\nThis permission is required to discover resources for the AWS/AutoScaling namespace\n```json\n\"autoscaling:DescribeAutoScalingGroups\"\n```\n\nThese permissions are required to discover resources for the AWS/DMS namespace\n```json\n\"dms:DescribeReplicationInstances\",\n\"dms:DescribeReplicationTasks\"\n```\n\n\nThis permission is required to discover resources for the AWS/EC2Spot namespace\n```json\n\"ec2:DescribeSpotFleetRequests\"\n```\n\nThis permission is required to discover resources for the AWS/Prometheus namespace\n```json\n\"aps:ListWorkspaces\"\n```\n\nThese permissions are required to discover resources for the AWS/StorageGateway namespace\n```json\n\"storagegateway:ListGateways\",\n\"storagegateway:ListTagsForResource\"\n```\n\nThis permission is required to discover resources for the AWS/TransitGateway namespace\n```json\n\"ec2:DescribeTransitGatewayAttachments\"\n```\n\nThis permission is required to discover protected resources for the AWS/DDoSProtection namespace\n```json\n\"shield:ListProtections\"\n```\n\nThe AWS IAM API supports creating account aliases, which are human-friendly names that can be used to easily identify accounts. An account can have at most a single alias, see ([docs](https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListAccountAliases.html)). Each alias must be unique across an AWS network partition ([docs](https://docs.aws.amazon.com/IAM/latest/UserGuide/console_account-alias.html#AboutAccountAlias)). The following permission is required to get the alias for an account, which is exported as a label in the `aws_account_info` metric:\n```json\n\"iam:ListAccountAliases\"\n```\n\nIf running YACE inside an AWS EC2 instance, the exporter will automatically attempt to assume the associated IAM Role. If this is undesirable behavior turn off the use the metadata endpoint by setting the environment variable `AWS_EC2_METADATA_DISABLED=true`.\n\n## Configuration\n\nRefer to the [configuration](docs/configuration.md) docs.\n\n## Metrics Examples\n\n```text\n### Metrics with exportedTagsOnMetrics\naws_ec2_cpuutilization_maximum{dimension_InstanceId=\"i-someid\", name=\"arn:aws:ec2:eu-west-1:472724724:instance/i-someid\", tag_Name=\"jenkins\"} 57.2916666666667\n\n### Info helper with tags\naws_elb_info{name=\"arn:aws:elasticloadbalancing:eu-west-1:472724724:loadbalancer/a815b16g3417211e7738a02fcc13bbf9\",tag_KubernetesCluster=\"production-19\",tag_Name=\"\",tag_kubernetes_io_cluster_production_19=\"owned\",tag_kubernetes_io_service_name=\"nginx-ingress/private-ext\",region=\"eu-west-1\"} 0\naws_ec2_info{name=\"arn:aws:ec2:eu-west-1:472724724:instance/i-someid\",tag_Name=\"jenkins\"} 0\n\n### Track cloudwatch requests to calculate costs\nyace_cloudwatch_requests_total 168\n```\n\n## Query Examples without exportedTagsOnMetrics\n\n```text\n# CPUUtilization + Name tag of the instance id - No more instance id needed for monitoring\naws_ec2_cpuutilization_average + on (name) group_left(tag_Name) aws_ec2_info\n\n# Free Storage in Megabytes + tag Type of the elasticsearch cluster\n(aws_es_free_storage_space_sum + on (name) group_left(tag_Type) aws_es_info) / 1024\n\n# Add kubernetes / kops tags on 4xx elb metrics\n(aws_elb_httpcode_backend_4_xx_sum + on (name) group_left(tag_KubernetesCluster,tag_kubernetes_io_service_name) aws_elb_info)\n\n# Availability Metric for ELBs (Successful requests / Total Requests) + k8s service name\n# Use nilToZero on all metrics else it won't work\n((aws_elb_request_count_sum - on (name) group_left() aws_elb_httpcode_backend_4_xx_sum) - on (name) group_left() aws_elb_httpcode_backend_5_xx_sum) + on (name) group_left(tag_kubernetes_io_service_name) aws_elb_info\n\n# Forecast your elasticsearch disk size in 7 days and report metrics with tags type and version\npredict_linear(aws_es_free_storage_space_minimum[2d], 86400 * 7) + on (name) group_left(tag_type, tag_version) aws_es_info\n\n# Forecast your cloudwatch costs for next 32 days based on last 10 minutes\n# 1.000.000 Requests free\n# 0.01 Dollar for 1.000 GetMetricStatistics Api Requests (https://aws.amazon.com/cloudwatch/pricing/)\n((increase(yace_cloudwatch_requests_total[10m]) * 6 * 24 * 32) - 100000) / 1000 * 0.01\n```\n\n## Override AWS endpoint urls\nto support local testing all AWS urls can be overridden with by setting an environment variable `AWS_ENDPOINT_URL`\n```shell\ndocker run -d --rm -v $PWD/credentials:/home/.aws/credentials -v $PWD/config.yml:/tmp/config.yml \\\n-e AWS_ENDPOINT_URL=http://localhost:4766 -p 5000:5000 --name yace quay.io/prometheuscommunity/yet-another-cloudwatch-exporter:latest\n```\n\n## Options\n### RoleArns\n\nMultiple roleArns are useful, when you are monitoring multi-account setup, where all accounts are using same AWS services. For example, you are running yace in monitoring account and you have number of accounts (for example newspapers, radio and television) running ECS clusters. Each account gives yace permissions to assume local IAM role, which has all the necessary permissions for Cloudwatch metrics. On this kind of setup, you could simply list:\n```yaml\napiVersion: v1alpha1\nsts-region: eu-west-1\ndiscovery:\n  jobs:\n    - type: AWS/ECS\n      regions:\n        - eu-north-1\n      roles:\n        - roleArn: \"arn:aws:iam::1111111111111:role/prometheus\" # newspaper\n        - roleArn: \"arn:aws:iam::2222222222222:role/prometheus\" # radio\n        - roleArn: \"arn:aws:iam::3333333333333:role/prometheus\" # television\n      metrics:\n        - name: MemoryReservation\n          statistics:\n            - Average\n            - Minimum\n            - Maximum\n          period: 600\n          length: 600\n```\n\nAdditionally, if the IAM role you want to assume requires an [External ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html?icmpid=docs_iam_console) you can specify it this way:\n\n```yaml\n  roles:\n    - roleArn: \"arn:aws:iam::1111111111111:role/prometheus\"\n      externalId: \"shared-external-identifier\"\n```\n\n### Requests concurrency\nThe flags 'cloudwatch-concurrency' and 'tag-concurrency' define the number of concurrent request to cloudwatch metrics and tags. Their default value is 5.\n\nSetting a higher value makes faster scraping times but can incur in throttling and the blocking of the API.\n\n### Decoupled scraping\nThe exporter scraped cloudwatch metrics in the background in fixed interval.\nThis protects from the abuse of API requests that can cause extra billing in AWS account.\n\nThe flag 'scraping-interval' defines the seconds between scrapes.\nThe default value is 300.\n\n## Embedding YACE in your application\n\nYACE can be used as a library and embedded into your application, see the [embedding guide](docs/embedding.md).\n\n## Troubleshooting / Debugging\n\n### Help my metrics are intermittent\n\n* Please, try out a bigger length e.g. for elb try out a length of 600 and a period of 600. Then test how low you can\ngo without losing data. ELB metrics on AWS are written every 5 minutes (300) in default.\n\n### My metrics only show new values after 5 minutes\n\n* Please, try to set a lower value for the 'scraping-interval' flag or set the 'decoupled-scraping' to false.\n\n## Contribute\n\n[Development Setup / Guide](/CONTRIBUTE.md)\n\n## Thank you\n\n* [Justin Santa Barbara](https://github.com/justinsb) - For telling me about AWS tags api which simplified a lot - Thanks!\n* [Brian Brazil](https://github.com/brian-brazil) - Who gave a lot of feedback regarding UX and prometheus lib - Thanks!\n\n[quay]: https://quay.io/repository/prometheuscommunity/yet-another-cloudwatch-exporter\n[docker]: https://hub.docker.com/r/prometheuscommunity/yet-another-cloudwatch-exporter\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Reporting a security issue\n\nThe Prometheus security policy, including how to report vulnerabilities, can be\nfound here:\n\n[https://prometheus.io/docs/operating/security/](https://prometheus.io/docs/operating/security/)\n"
  },
  {
    "path": "VERSION",
    "content": "0.64.0\n"
  },
  {
    "path": "cmd/yace/main.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"net/http\"\n\t\"net/http/pprof\"\n\t\"os\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/prometheus/common/promslog\"\n\tpromslogflag \"github.com/prometheus/common/promslog/flag\"\n\t\"github.com/prometheus/common/version\"\n\t\"github.com/urfave/cli/v2\"\n\t\"golang.org/x/sync/semaphore\"\n\n\texporter \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n)\n\nconst (\n\tenableFeatureFlag = \"enable-feature\"\n\thtmlVersion       = `<html>\n<head><title>Yet Another CloudWatch Exporter</title></head>\n<body>\n<h1>Thanks for using YACE :)</h1>\nVersion: %s\n<p><a href=\"/metrics\">Metrics</a></p>\n%s\n</body>\n</html>`\n\thtmlPprof = `<p><a href=\"/debug/pprof\">Pprof</a><p>`\n)\n\nvar sem = semaphore.NewWeighted(1)\n\nconst (\n\tdefaultLogLevel  = \"info\"\n\tdefaultLogFormat = \"json\"\n)\n\nvar (\n\taddr                  string\n\tconfigFile            string\n\tlogLevel              string\n\tlogFormat             string\n\tfips                  bool\n\tcloudwatchConcurrency cloudwatch.ConcurrencyConfig\n\ttagConcurrency        int\n\tscrapingInterval      int\n\tmetricsPerQuery       int\n\tlabelsSnakeCase       bool\n\tprofilingEnabled      bool\n\n\tlogger *slog.Logger\n)\n\nfunc main() {\n\tapp := NewYACEApp()\n\tif err := app.Run(os.Args); err != nil {\n\t\t// if we exit very early we'll not have set up the logger yet\n\t\tif logger == nil {\n\t\t\tjsonFmt := promslog.NewFormat()\n\t\t\t_ = jsonFmt.Set(\"json\")\n\t\t\tlogger = promslog.New(&promslog.Config{Format: jsonFmt})\n\t\t}\n\t\tlogger.Error(\"Error running yace\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n}\n\n// NewYACEApp creates a new cli.App implementing the YACE entrypoints and CLI arguments.\nfunc NewYACEApp() *cli.App {\n\tyace := cli.NewApp()\n\tyace.Name = \"Yet Another CloudWatch Exporter\"\n\tyace.Version = version.Version\n\tyace.Usage = \"YACE configured to retrieve CloudWatch metrics through the AWS API\"\n\tyace.Description = \"\"\n\tyace.Authors = []*cli.Author{\n\t\t{Name: \"\", Email: \"\"},\n\t}\n\n\tyace.Flags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName:        \"listen-address\",\n\t\t\tValue:       \":5000\",\n\t\t\tUsage:       \"The address to listen on\",\n\t\t\tDestination: &addr,\n\t\t\tEnvVars:     []string{\"listen-address\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:        \"config.file\",\n\t\t\tValue:       \"config.yml\",\n\t\t\tUsage:       \"Path to configuration file\",\n\t\t\tDestination: &configFile,\n\t\t\tEnvVars:     []string{\"config.file\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:        \"log.level\",\n\t\t\tValue:       defaultLogLevel,\n\t\t\tUsage:       promslogflag.LevelFlagHelp,\n\t\t\tDestination: &logLevel,\n\t\t\tAction: func(_ *cli.Context, s string) error {\n\t\t\t\tif !slices.Contains(promslog.LevelFlagOptions, s) {\n\t\t\t\t\treturn fmt.Errorf(\"unrecognized log format %q\", s)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName:        \"log.format\",\n\t\t\tValue:       defaultLogFormat,\n\t\t\tUsage:       promslogflag.FormatFlagHelp,\n\t\t\tDestination: &logFormat,\n\t\t\tAction: func(_ *cli.Context, s string) error {\n\t\t\t\tif !slices.Contains(promslog.FormatFlagOptions, s) {\n\t\t\t\t\treturn fmt.Errorf(\"unrecognized log format %q\", s)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:        \"fips\",\n\t\t\tValue:       false,\n\t\t\tUsage:       \"Use FIPS compliant AWS API endpoints\",\n\t\t\tDestination: &fips,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"cloudwatch-concurrency\",\n\t\t\tValue:       exporter.DefaultCloudwatchConcurrency.SingleLimit,\n\t\t\tUsage:       \"Maximum number of concurrent requests to CloudWatch API.\",\n\t\t\tDestination: &cloudwatchConcurrency.SingleLimit,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:        \"cloudwatch-concurrency.per-api-limit-enabled\",\n\t\t\tValue:       exporter.DefaultCloudwatchConcurrency.PerAPILimitEnabled,\n\t\t\tUsage:       \"Whether to enable the per API CloudWatch concurrency limiter. When enabled, the concurrency `-cloudwatch-concurrency` flag will be ignored.\",\n\t\t\tDestination: &cloudwatchConcurrency.PerAPILimitEnabled,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"cloudwatch-concurrency.list-metrics-limit\",\n\t\t\tValue:       exporter.DefaultCloudwatchConcurrency.ListMetrics,\n\t\t\tUsage:       \"Maximum number of concurrent requests to ListMetrics CloudWatch API. Used if the -cloudwatch-concurrency.per-api-limit-enabled concurrency limiter is enabled.\",\n\t\t\tDestination: &cloudwatchConcurrency.ListMetrics,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"cloudwatch-concurrency.get-metric-data-limit\",\n\t\t\tValue:       exporter.DefaultCloudwatchConcurrency.GetMetricData,\n\t\t\tUsage:       \"Maximum number of concurrent requests to GetMetricData CloudWatch API. Used if the -cloudwatch-concurrency.per-api-limit-enabled concurrency limiter is enabled.\",\n\t\t\tDestination: &cloudwatchConcurrency.GetMetricData,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"cloudwatch-concurrency.get-metric-statistics-limit\",\n\t\t\tValue:       exporter.DefaultCloudwatchConcurrency.GetMetricStatistics,\n\t\t\tUsage:       \"Maximum number of concurrent requests to GetMetricStatistics CloudWatch API. Used if the -cloudwatch-concurrency.per-api-limit-enabled concurrency limiter is enabled.\",\n\t\t\tDestination: &cloudwatchConcurrency.GetMetricStatistics,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"tag-concurrency\",\n\t\t\tValue:       exporter.DefaultTaggingAPIConcurrency,\n\t\t\tUsage:       \"Maximum number of concurrent requests to Resource Tagging API.\",\n\t\t\tDestination: &tagConcurrency,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"scraping-interval\",\n\t\t\tValue:       300,\n\t\t\tUsage:       \"Seconds to wait between scraping the AWS metrics\",\n\t\t\tDestination: &scrapingInterval,\n\t\t\tEnvVars:     []string{\"scraping-interval\"},\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName:        \"metrics-per-query\",\n\t\t\tValue:       exporter.DefaultMetricsPerQuery,\n\t\t\tUsage:       \"Number of metrics made in a single GetMetricsData request\",\n\t\t\tDestination: &metricsPerQuery,\n\t\t\tEnvVars:     []string{\"metrics-per-query\"},\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:        \"labels-snake-case\",\n\t\t\tValue:       exporter.DefaultLabelsSnakeCase,\n\t\t\tUsage:       \"Whether labels should be output in snake case instead of camel case\",\n\t\t\tDestination: &labelsSnakeCase,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName:        \"profiling.enabled\",\n\t\t\tValue:       false,\n\t\t\tUsage:       \"Enable pprof endpoints\",\n\t\t\tDestination: &profilingEnabled,\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName:  enableFeatureFlag,\n\t\t\tUsage: \"Comma-separated list of enabled features\",\n\t\t},\n\t}\n\n\tyace.Commands = []*cli.Command{\n\t\t{\n\t\t\tName:    \"verify-config\",\n\t\t\tAliases: []string{\"vc\"},\n\t\t\tUsage:   \"Loads and attempts to parse config file, then exits. Useful for CI/CD validation\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\t&cli.StringFlag{Name: \"config.file\", Value: \"config.yml\", Usage: \"Path to configuration file.\", Destination: &configFile},\n\t\t\t},\n\t\t\tAction: func(_ *cli.Context) error {\n\t\t\t\tlogger = newLogger(logFormat, logLevel).With(\"version\", version.Version)\n\t\t\t\tlogger.Info(\"Parsing config\")\n\t\t\t\tcfg := config.ScrapeConf{}\n\t\t\t\tif _, err := cfg.Load(configFile, logger); err != nil {\n\t\t\t\t\tlogger.Error(\"Couldn't read config file\", \"err\", err, \"path\", configFile)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tlogger.Info(\"Config file is valid\", \"path\", configFile)\n\t\t\t\tos.Exit(0)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:    \"version\",\n\t\t\tAliases: []string{\"v\"},\n\t\t\tUsage:   \"prints current yace version.\",\n\t\t\tAction: func(_ *cli.Context) error {\n\t\t\t\tfmt.Println(version.Version)\n\t\t\t\tos.Exit(0)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tyace.Action = startScraper\n\n\treturn yace\n}\n\nfunc startScraper(c *cli.Context) error {\n\tlogger = newLogger(logFormat, logLevel).With(\"version\", version.Version)\n\n\t// log warning if the two concurrency limiting methods are configured via CLI\n\tif c.IsSet(\"cloudwatch-concurrency\") && c.IsSet(\"cloudwatch-concurrency.per-api-limit-enabled\") {\n\t\tlogger.Warn(\"Both `cloudwatch-concurrency` and `cloudwatch-concurrency.per-api-limit-enabled` are set. `cloudwatch-concurrency` will be ignored, and the per-api concurrency limiting strategy will be favoured.\")\n\t}\n\n\tlogger.Info(\"Parsing config\")\n\n\tcfg := config.ScrapeConf{}\n\tjobsCfg, err := cfg.Load(configFile, logger)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't read %s: %w\", configFile, err)\n\t}\n\n\tfeatureFlags := c.StringSlice(enableFeatureFlag)\n\ts := NewScraper(featureFlags)\n\n\tcachingFactory, err := clients.NewFactory(logger, jobsCfg, fips)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to construct aws sdk v2 client cache: %w\", err)\n\t}\n\n\tctx, cancelRunningScrape := context.WithCancel(context.Background())\n\tgo s.decoupled(ctx, logger, jobsCfg, cachingFactory)\n\n\tmux := http.NewServeMux()\n\n\tif profilingEnabled {\n\t\tmux.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\tmux.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\t\tmux.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\tmux.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\tmux.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\t}\n\n\tmux.HandleFunc(\"/metrics\", s.makeHandler())\n\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tpprofLink := \"\"\n\t\tif profilingEnabled {\n\t\t\tpprofLink = htmlPprof\n\t\t}\n\n\t\t_, _ = fmt.Fprintf(w, htmlVersion, version.Version, pprofLink)\n\t})\n\n\tmux.HandleFunc(\"/healthz\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write([]byte(\"ok\"))\n\t})\n\n\tmux.HandleFunc(\"/reload\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Info(\"Parsing config\")\n\t\tnewCfg := config.ScrapeConf{}\n\t\tnewJobsCfg, err := newCfg.Load(configFile, logger)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Couldn't read config file\", \"err\", err, \"path\", configFile)\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Info(\"Reset clients cache\")\n\t\tcache, err := clients.NewFactory(logger, newJobsCfg, fips)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Failed to construct aws sdk v2 client cache\", \"err\", err, \"path\", configFile)\n\t\t\treturn\n\t\t}\n\n\t\tcancelRunningScrape()\n\t\tctx, cancelRunningScrape = context.WithCancel(context.Background())\n\t\tgo s.decoupled(ctx, logger, newJobsCfg, cache)\n\t})\n\n\tlogger.Info(\"Yace startup completed\", \"build_info\", version.Info(), \"build_context\", version.BuildContext(), \"feature_flags\", strings.Join(featureFlags, \",\"))\n\n\tsrv := &http.Server{Addr: addr, Handler: mux}\n\treturn srv.ListenAndServe()\n}\n\nfunc newLogger(format, level string) *slog.Logger {\n\t// If flag parsing was successful, then we know that format and level\n\t// are both valid options; no need to error check their returns, just\n\t// set their values.\n\tf := promslog.NewFormat()\n\t_ = f.Set(format)\n\n\tlvl := promslog.NewLevel()\n\t_ = lvl.Set(level)\n\n\treturn promslog.New(&promslog.Config{Format: f, Level: lvl})\n}\n"
  },
  {
    "path": "cmd/yace/main_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage main\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli/v2\"\n)\n\nfunc TestYACEApp_FeatureFlagsParsedCorrectly(t *testing.T) {\n\tapp := NewYACEApp()\n\n\t// two feature flags\n\tapp.Action = func(c *cli.Context) error {\n\t\tfeatureFlags := c.StringSlice(enableFeatureFlag)\n\t\trequire.Equal(t, []string{\"feature1\", \"feature2\"}, featureFlags)\n\t\treturn nil\n\t}\n\n\trequire.NoError(t, app.Run([]string{\"yace\", \"-enable-feature=feature1,feature2\"}), \"error running test command\")\n\n\t// empty feature flags\n\tapp.Action = func(c *cli.Context) error {\n\t\tfeatureFlags := c.StringSlice(enableFeatureFlag)\n\t\trequire.Len(t, featureFlags, 0)\n\t\treturn nil\n\t}\n\n\trequire.NoError(t, app.Run([]string{\"yace\"}), \"error running test command\")\n}\n"
  },
  {
    "path": "cmd/yace/scraper.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage main\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"net/http\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\n\texporter \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype Scraper struct {\n\tregistry     atomic.Pointer[prometheus.Registry]\n\tfeatureFlags []string\n}\n\ntype cachingFactory interface {\n\tclients.Factory\n\tRefresh()\n\tClear()\n}\n\nfunc NewScraper(featureFlags []string) *Scraper {\n\ts := &Scraper{\n\t\tregistry:     atomic.Pointer[prometheus.Registry]{},\n\t\tfeatureFlags: featureFlags,\n\t}\n\ts.registry.Store(prometheus.NewRegistry())\n\treturn s\n}\n\nfunc (s *Scraper) makeHandler() func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler := promhttp.HandlerFor(s.registry.Load(), promhttp.HandlerOpts{\n\t\t\tDisableCompression: false,\n\t\t})\n\t\thandler.ServeHTTP(w, r)\n\t}\n}\n\nfunc (s *Scraper) decoupled(ctx context.Context, logger *slog.Logger, jobsCfg model.JobsConfig, cache cachingFactory) {\n\tlogger.Debug(\"Starting scraping async\")\n\ts.scrape(ctx, logger, jobsCfg, cache)\n\n\tscrapingDuration := time.Duration(scrapingInterval) * time.Second\n\tticker := time.NewTicker(scrapingDuration)\n\tlogger.Debug(\"Initial scrape completed\", \"scraping_interval\", scrapingInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tlogger.Debug(\"Starting scraping async\")\n\t\t\tgo s.scrape(ctx, logger, jobsCfg, cache)\n\t\t}\n\t}\n}\n\nfunc (s *Scraper) scrape(ctx context.Context, logger *slog.Logger, jobsCfg model.JobsConfig, cache cachingFactory) {\n\tif !sem.TryAcquire(1) {\n\t\t// This shouldn't happen under normal use, users should adjust their configuration when this occurs.\n\t\t// Let them know by logging a warning.\n\t\tlogger.Warn(\"Another scrape is already in process, will not start a new one. \" +\n\t\t\t\"Adjust your configuration to ensure the previous scrape completes first.\")\n\t\treturn\n\t}\n\tdefer sem.Release(1)\n\n\tnewRegistry := prometheus.NewRegistry()\n\tfor _, metric := range exporter.Metrics {\n\t\tif err := newRegistry.Register(metric); err != nil {\n\t\t\tlogger.Warn(\"Could not register cloudwatch api metric\")\n\t\t}\n\t}\n\n\t// since we have called refresh, we have loaded all the credentials\n\t// into the clients and it is now safe to call concurrently. Defer the\n\t// clearing, so we always clear credentials before the next scrape\n\tcache.Refresh()\n\tdefer cache.Clear()\n\n\toptions := []exporter.OptionsFunc{\n\t\texporter.MetricsPerQuery(metricsPerQuery),\n\t\texporter.LabelsSnakeCase(labelsSnakeCase),\n\t\texporter.EnableFeatureFlag(s.featureFlags...),\n\t\texporter.TaggingAPIConcurrency(tagConcurrency),\n\t}\n\n\tif cloudwatchConcurrency.PerAPILimitEnabled {\n\t\toptions = append(options, exporter.CloudWatchPerAPILimitConcurrency(cloudwatchConcurrency.ListMetrics, cloudwatchConcurrency.GetMetricData, cloudwatchConcurrency.GetMetricStatistics))\n\t} else {\n\t\toptions = append(options, exporter.CloudWatchAPIConcurrency(cloudwatchConcurrency.SingleLimit))\n\t}\n\n\terr := exporter.UpdateMetrics(\n\t\tctx,\n\t\tlogger,\n\t\tjobsCfg,\n\t\tnewRegistry,\n\t\tcache,\n\t\toptions...,\n\t)\n\tif err != nil {\n\t\tlogger.Error(\"error updating metrics\", \"err\", err)\n\t}\n\n\ts.registry.Store(newRegistry)\n\tlogger.Debug(\"Metrics scraped\")\n}\n"
  },
  {
    "path": "docker-compose/README.md",
    "content": "## Setting up a local docker-compose environment\n\nThis folder contains a [docker-compose](./docker-compose.yaml) configuration file to start a local development environment. \nThis includes:\n- YACE, using as config file [yace-config.yaml](./yace-config.yaml)\n- Prometheus, with a scraping configuration targeting YACE\n- Grafana, wih no login required and the Prometheus datasource configured\n\nDocker will mount the `~/.aws` directory in order to re-utilize the host's AWS credentials. For selecting which region\nand AWS profile to use, fill in the `AWS_REGION` and `AWS_PROFILE` variables passed to the `docker-compose up` command,\nas shown below.\n\n```bash\n# Build the YACE docker image\ndocker-compose build\n\n# Start all docker-compose resource\nAWS_REGION=us-east-1 AWS_PROFILE=sandbox docker-compose up -d \n```\n\nAfter that, Prometheus will be exposed at [http://localhost:9090](http://localhost:9090), and Grafana in [http://localhost:3000](http://localhost:3000).\n"
  },
  {
    "path": "docker-compose/docker-compose.yaml",
    "content": "version: '3.8'\n\nnetworks:\n  monitoring:\n    driver: bridge\n\nvolumes:\n  prometheus_data: {}\n\nservices:\n  grafana:\n    image: grafana/grafana:9.4.3\n    ports:\n      - 3000:3000/tcp\n    volumes:\n      - ./grafana/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yml\n    environment:\n      # configure no-login required access\n      GF_AUTH_ANONYMOUS_ORG_ROLE: \"Admin\"\n      GF_AUTH_ANONYMOUS_ENABLED: \"true\"\n      GF_AUTH_BASIC_ENABLED: \"false\"\n    networks:\n      - monitoring\n\n  prometheus:\n    image: prom/prometheus:latest\n    container_name: prometheus\n    restart: unless-stopped\n    volumes:\n      - ./prometheus.yaml:/etc/prometheus/prometheus.yaml\n      - prometheus_data:/prometheus\n    command:\n      - '--config.file=/etc/prometheus/prometheus.yaml'\n      - '--storage.tsdb.path=/prometheus'\n      - '--web.console.libraries=/etc/prometheus/console_libraries'\n      - '--web.console.templates=/etc/prometheus/consoles'\n      - '--web.enable-lifecycle'\n    ports:\n      - \"9090:9090\"\n    expose:\n      - 9090\n    networks:\n      - monitoring\n\n  yace:\n    build:\n      context: ../\n      dockerfile: Dockerfile\n    restart: always\n    environment:\n      AWS_REGION: ${AWS_REGION}\n      AWS_PROFILE: ${AWS_PROFILE}\n    expose:\n      - 8080\n    volumes:\n      - ./yace-config.yaml:/tmp/config.yml\n      - $HOME/.aws:/home/.aws:ro\n    command:\n      - -listen-address=:8080\n      - -config.file=/tmp/config.yml\n    networks:\n      - monitoring\n"
  },
  {
    "path": "docker-compose/grafana/datasource.yaml",
    "content": "apiVersion: 1\n\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    access: proxy\n    orgId: 1\n    url: http://prometheus:9090\n    basicAuth: false\n    isDefault: true\n    editable: true\n"
  },
  {
    "path": "docker-compose/prometheus.yaml",
    "content": "global:\n  scrape_interval: 1m\nscrape_configs:\n  - job_name: prometheus\n    scrape_interval: 1m\n    static_configs:\n      - targets:\n          - localhost:9090\n  - job_name: yace\n    static_configs:\n      - targets:\n          - yace:8080\n"
  },
  {
    "path": "docker-compose/yace-config.yaml",
    "content": "apiVersion: v1alpha1\nsts-region: us-east-1\ndiscovery:\n  jobs:\n    - type: AWS/ECS\n      regions: [us-east-1]\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUReservation\n          statistics:\n            - Average\n        - name: MemoryReservation\n          statistics:\n            - Average\n        - name: CPUUtilization\n          statistics:\n            - Average\n        - name: MemoryUtilization\n          statistics:\n            - Average\n    - type: AWS/EC2\n      regions: [us-east-1]\n      includeContextOnInfoMetrics: true\n      metrics:\n        - name: CPUUtilization\n          statistics:\n            - Average\n"
  },
  {
    "path": "docs/configuration.md",
    "content": "# Configuration\n\nYACE has two configuration mechanisms:\n\n- [command-line flags](#command-line-flags)\n- [yaml configuration file](#yaml-configuration-file)\n\nThe command-line flags configure things which cannot change at runtime, such as the listen port for the HTTP server. The yaml file is used to configure scrape jobs and can be reloaded at runtime. The configuration file path is passed to YACE through the `-config.file` command line flag.\n\n## Command-line flags\n\nCommand-line flags are used to configure settings of the exporter which cannot be updated at runtime.\n\nAll flags may be prefixed with either one hypen or two (i.e., both `-config.file` and `--config.file` are valid).\n\n| Flag | Description | Default value |\n| --- | --- | --- |\n| `-listen-address` | Network address to listen to | `127.0.0.1:5000` |\n| `-config.file` | Path to the configuration file | `config.yml` |\n| `-log.format` | Output format of log messages. One of: [logfmt, json] | `json` |\n| `-log.level` | Log at selected level. One of: [debug, info, warn, error] | `info` |\n| `-fips` | Use FIPS compliant AWS API | `false` |\n| `-cloudwatch-concurrency` | Maximum number of concurrent requests to CloudWatch API | `5` |\n| `-cloudwatch-concurrency.per-api-limit-enabled` | Enables a concurrency limiter, that has a specific limit per CloudWatch API call. | `false` |\n| `-cloudwatch-concurrency.list-metrics-limit` | Maximum number of concurrent requests to CloudWatch `ListMetrics` API. Only applicable if `per-api-limit-enabled` is `true`. | `5` |\n| `-cloudwatch-concurrency.get-metric-data-limit` | Maximum number of concurrent requests to CloudWatch `GetMetricsData` API. Only applicable if `per-api-limit-enabled` is `true`. | `5` |\n| `-cloudwatch-concurrency.get-metric-statistics-limit` | Maximum number of concurrent requests to CloudWatch `GetMetricStatistics` API. Only applicable if `per-api-limit-enabled` is `true`. | `5` |\n| `-tag-concurrency` | Maximum number of concurrent requests to Resource Tagging API | `5` |\n| `-scraping-interval` | Seconds to wait between scraping the AWS metrics | `300` |\n| `-metrics-per-query` | Number of metrics made in a single GetMetricsData request | `500` |\n| `-labels-snake-case`  | Output labels on metrics in snake case instead of camel case | `false` |\n| `-profiling.enabled` | Enable the /debug/pprof endpoints for profiling | `false` |\n\n## YAML configuration file\n\nTo specify which configuration file to load, pass the `-config.file` flag at the command line. The file is written in the YAML format, defined by the scheme below. Brackets indicate that a parameter is optional.\n\nBelow are the top level fields of the YAML configuration file:\n\n```yaml\n# Configuration file version. Must be set to \"v1alpha1\" currently.\napiVersion: v1alpha1\n\n# STS regional endpoint (optional)\n[ sts-region: <string>]\n\n# Note that at least one of the following blocks must be defined.\n\n# Configurations for jobs of type \"auto-discovery\"\ndiscovery: <discovery_jobs_list_config>\n\n# Configurations for jobs of type \"static\"\nstatic:\n  [ - <static_job_config> ... ]\n\n# Configurations for jobs of type \"custom namespace\"\ncustomNamespace:\n  [ - <custom_namespace_job_config> ... ]\n```\n\nNote that while the `discovery`, `static` and `customNamespace` blocks are all optionals, at least one of them must be defined.\n\n### `discovery_jobs_list_config`\n\nThe `discovery_jobs_list_config` block configures jobs of type \"auto-discovery\".\n\n> Note: Only [tagged resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) are discovered.\n\n```yaml\n# List of tags per service to export to all metrics\n[exportedTagsOnMetrics: <exported_tags_config> ]\n\n# List of \"auto-discovery\" jobs\njobs:\n  [ - <discovery_job_config> ... ]\n```\n\n### `discovery_job_config`\n\nThe `discovery_job_config` block specifies the details of a job of type \"auto-discovery\".\n\n```yaml\n# List of AWS regions\nregions:\n  [ - <string> ... ]\n\n# Cloudwatch service alias (\"alb\", \"ec2\", etc) or namespace name (\"AWS/EC2\", \"AWS/S3\", etc)\ntype: <string>\n\n#  List of IAM roles to assume (optional)\nroles:\n  [ - <role_config> ... ]\n\n# List of Key/Value pairs to use for tag filtering (all must match). \n# The key is the AWS Tag key and is case-sensitive  \n# The value will be treated as a regex\nsearchTags:\n  [ - <search_tags_config> ... ]\n\n# Custom tags to be added as a list of Key/Value pairs\ncustomTags:\n  [ - <custom_tags_config> ... ]\n\n# List of metric dimensions to query. Before querying metric values, the total list of metrics will be filtered to only those that contain exactly this list of dimensions. An empty or undefined list results in all dimension combinations being included.\ndimensionNameRequirements:\n  [ - <string> ... ]\n\n# Specifies how the current time is rounded before calculating start/end times for CloudWatch GetMetricData requests.\n# This rounding is optimize performance of the CloudWatch request.\n# This setting only makes sense to use if, for example, you specify a very long period (such as 1 day) but want your times rounded to a shorter time (such as 5 minutes). For example, a value of 300 will round the current time to the nearest 5 minutes. If not specified, the roundingPeriod defaults to the same value as shortest period in the job.\n[ roundingPeriod: <int> ]\n\n# Passes down the flag `--recently-active PT3H` to the CloudWatch API. This will only return metrics that have been active in the last 3 hours.\n# This is useful for reducing the number of metrics returned by CloudWatch, which can be very large for some services. See AWS Cloudwatch API docs for [ListMetrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html) for more details.\n[ recentlyActiveOnly: <boolean> ]\n\n# Can be used to include contextual information (account_id, region, and customTags) on info metrics and cloudwatch metrics. This can be particularly \n# useful when cloudwatch metrics might not be present or when using info metrics to understand where your resources exist\n[ includeContextOnInfoMetrics: <boolean> ]\n\n# (optional) This is an experimental feature that can be used to enable enhanced metrics for specific services within this discovery job. It might be subject to changes in future releases.\nenhancedMetrics:\n    [ - <enhanced_metrics_config> ... ]\n\n# List of statistic types, e.g. \"Minimum\", \"Maximum\", etc (General Setting for all metrics in this job)\nstatistics:\n  [ - <string> ... ]\n\n# Statistic period in seconds (General Setting for all metrics in this job)\n[ period: <int> ]\n\n# How far back to request data for in seconds (General Setting for all metrics in this job)\n[ length: <int> ]\n\n# If set it will request metrics up until `current_time - delay` (General Setting for all metrics in this job)\n[ delay: <int> ]\n\n# Return 0 value if Cloudwatch returns no metrics at all. By default `NaN` will be reported (General Setting for all metrics in this job)\n[ nilToZero: <boolean> ]\n\n# Export the metric with the original CloudWatch timestamp (General Setting for all metrics in this job)\n[ addCloudwatchTimestamp: <boolean> ]\n\n# Enables the inclusion of past metric data points from the CloudWatch response if available.\n# This is useful when a metric is configured with a 60-second period and a 300-second duration, ensuring that all\n# five data points are exposed at the metrics endpoint instead of only the latest one.\n# Note: This option requires `addCloudwatchTimestamp` to be enabled.\n# The metric destination must support out of order timestamps, see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb\n# (General Setting for all metrics in this job)\n[ exportAllDataPoints: <boolean> ]\n\n# List of metric definitions\nmetrics:\n  [ - <metric_config> ... ]\n```\n\nExample config file:\n\n```yaml\napiVersion: v1alpha1\nsts-region: eu-west-1\ndiscovery:\n  exportedTagsOnMetrics:\n    kafka:\n      - Name\n  jobs:\n  - type: kafka\n    regions:\n      - eu-west-1\n    searchTags:\n      - key: env\n        value: dev\n    metrics:\n      - name: BytesOutPerSec\n        statistics:\n        - Average\n        period: 600\n        length: 600\n```\n\n### `static_job_config`\n\nThe `static_job_config` block configures jobs of type \"static\".\n\n```yaml\n# Name of the job (required)\nname: <string>\n\n# CloudWatch namespace\nnamespace: <string>\n\n# List of AWS regions\nregions:\n  [ - <string> ...]\n\n# List of IAM roles to assume (optional)\nroles:\n  [ - <role_config> ... ]\n\n# Custom tags to be added as a list of Key/Value pairs\ncustomTags:\n  [ - <custom_tags_config> ... ]\n\n# CloudWatch metric dimensions as a list of Name/Value pairs\ndimensions: [ <dimensions_config> ]\n\n# List of metric definitions\nmetrics:\n  [ - <metric_config> ... ]\n```\n\nExample config file:\n\n```yaml\napiVersion: v1alpha1\nsts-region: eu-west-1\nstatic:\n  - namespace: AWS/AutoScaling\n    name: must_be_set\n    regions:\n      - eu-west-1\n    dimensions:\n     - name: AutoScalingGroupName\n       value: MyGroup\n    customTags:\n      - key: CustomTag\n        value: CustomValue\n    metrics:\n      - name: GroupInServiceInstances\n        statistics:\n        - Minimum\n        period: 60\n        length: 300\n```\n\n### `custom_namespace_job_config`\n\nThe `custom_namespace_job_config` block configures jobs of type \"custom namespace\".\n\n```yaml\n# Name of the job (required)\nname: <string>\n\n# CloudWatch namespace\nnamespace: <string>\n\n# List of AWS regions\nregions:\n  [ - <string> ...]\n\n#  List of IAM roles to assume (optional)\nroles:\n  [ - <role_config> ... ]\n\n# Custom tags to be added as a list of Key/Value pairs\ncustomTags:\n  [ - <custom_tags_config> ... ]\n\n# List of metric dimensions to query. Before querying metric values, the total list of metrics will be filtered to only those that contain exactly this list of dimensions. An empty or undefined list results in all dimension combinations being included.\ndimensionNameRequirements:\n  [ - <string> ... ]\n\n# Specifies how the current time is rounded before calculating start/end times for CloudWatch GetMetricData requests.\n# This rounding is optimize performance of the CloudWatch request.\n# This setting only makes sense to use if, for example, you specify a very long period (such as 1 day) but want your times rounded to a shorter time (such as 5 minutes). For example, a value of 300 will round the current time to the nearest 5 minutes. If not specified, the roundingPeriod defaults to the same value as shortest period in the job.\n[ roundingPeriod: <int> ]\n\n# Passes down the flag `--recently-active PT3H` to the CloudWatch API. This will only return metrics that have been active in the last 3 hours.\n# This is useful for reducing the number of metrics returned by CloudWatch, which can be very large for some services. See AWS Cloudwatch API docs for [ListMetrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html) for more details.\n[ recentlyActiveOnly: <boolean> ]\n\n# List of statistic types, e.g. \"Minimum\", \"Maximum\", etc (General Setting for all metrics in this job)\nstatistics:\n  [ - <string> ... ]\n\n# Statistic period in seconds (General Setting for all metrics in this job)\n[ period: <int> ]\n\n# How far back to request data for in seconds (General Setting for all metrics in this job)\n[ length: <int> ]\n\n# If set it will request metrics up until `current_time - delay` (General Setting for all metrics in this job)\n[ delay: <int> ]\n\n# Return 0 value if Cloudwatch returns no metrics at all. By default `NaN` will be reported (General Setting for all metrics in this job)\n[ nilToZero: <boolean> ]\n\n# Export the metric with the original CloudWatch timestamp (General Setting for all metrics in this job)\n[ addCloudwatchTimestamp: <boolean> ]\n\n# Enables the inclusion of past metric data points from the CloudWatch response if available.\n# This is useful when a metric is configured with a 60-second period and a 300-second duration, ensuring that all\n# five data points are exposed at the metrics endpoint instead of only the latest one.\n# Note: This option requires `addCloudwatchTimestamp` to be enabled.\n# The metric destination must support out of order timestamps, see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb\n# (General Setting for all metrics in this job)\n[ exportAllDataPoints: <boolean> ]\n\n# List of metric definitions\nmetrics:\n  [ - <metric_config> ... ]\n```\n\nExample config file:\n\n```yaml\napiVersion: v1alpha1\nsts-region: eu-west-1\ncustomNamespace:\n  - name: customEC2Metrics\n    namespace: CustomEC2Metrics\n    regions:\n      - us-east-1\n    metrics:\n      - name: cpu_usage_idle\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n      - name: disk_free\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n```\n\n### `metric_config`\n\nSome settings at the job level are overridden by settings at the metric level.\nThis allows for a specific setting to override a general setting.\n\n```yaml\n# CloudWatch metric name\nname: <string>\n\n# List of statistic types, e.g. \"Minimum\", \"Maximum\", etc. (Overrides job level setting)\nstatistics:\n  [ - <string> ... ]\n\n# Statistic period in seconds (Overrides job level setting)\n[ period: <int> ]\n\n# How far back to request data for in seconds (Overrides job level setting)\n[ length: <int> ]\n\n# If set it will request metrics up until `current_time - delay` (Overrides job level setting)\n[ delay: <int> ]\n\n# Return 0 value if Cloudwatch returns no metrics at all. By default `NaN` will be reported (Overrides job level setting)\n[ nilToZero: <boolean> ]\n\n# Export the metric with the original CloudWatch timestamp (Overrides job level setting)\n[ addCloudwatchTimestamp: <boolean> ]\n\n# Enables the inclusion of past metric data points from the CloudWatch response if available.\n# This is useful when a metric is configured with a 60-second period and a 300-second duration, ensuring that all\n# five data points are exposed at the metrics endpoint instead of only the latest one.\n# Note: This option requires `addCloudwatchTimestamp` to be enabled.\n# The metric destination must support out of order timestamps, see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb\n# (General Setting for all metrics in this job)\n[ exportAllDataPoints: <boolean> ]\n```\n\nNotes:\n- Available statistics: `Maximum`, `Minimum`, `Sum`, `SampleCount`, `Average`, `pXX` (e.g. `p90`).\n\n- Watch out using `addCloudwatchTimestamp` for sparse metrics, e.g from S3, since Prometheus won't scrape metrics containing timestamps older than 2-3 hours. Also the same applies when enabling `exportAllDataPoints` in any metric.\n\n### `exported_tags_config`\n\nThis is an example of the `exported_tags_config` block:\n\n```yaml\nexportedTagsOnMetrics:\n  ebs:\n    - VolumeId\n  kafka:\n    - Name\n```\n\n### `role_config`\n\nThis is an example of the `role_config` block:\n\n```yaml\nroles:\n  - roleArn: \"arn:aws:iam::123456789012:role/Prometheus\"\n    externalId: \"shared-external-identifier\" # optional\n```\n\n### `search_tags_config`\n\nThis is an example of the `search_tags_config` block:\n\n```yaml\nsearchTags:\n  - key: env\n    value: production\n```\n\n### `custom_tags_config`\n\nThis is an example of the `custom_tags_config` block:\n\n```yaml\ncustomTags:\n  - key: CustomTag\n    value: CustomValue\n```\n\n### `dimensions_config`\n\nThis is an example of the `dimensions_config` block:\n\n```yaml\ndimensions:\n  - name: AutoScalingGroupName\n    value: MyGroup\n```\n\n### `enhanced_metrics_config`\n\nThe `enhanced_metrics_config` block allows enabling enhanced metrics for specific metrics within a discovery job.\n\nCurrently supported enhanced metrics are:\n\n- AWS/Lambda (Timeout) - The maximum execution duration permitted for the function before termination.\n- AWS/DynamoDB (ItemCount) - The count of items in the table, updated approximately every six hours; may not reflect recent changes.\n- AWS/RDS (AllocatedStorage) - The storage capacity in bytes allocated for the DB instance.\n- AWS/ElastiCache (NumCacheNodes) - The count of cache nodes in the cluster; must be 1 for Valkey or Redis OSS clusters, or between 1 and 40 for Memcached clusters.\n\n```yaml\nenhancedMetrics:\n    - name: ItemCount\n```"
  },
  {
    "path": "docs/embedding.md",
    "content": "# Embedding YACE in your application\n\nIt is possible to embed YACE into an external Go application. This mode might be useful to you if you would like to scrape on demand or run in a stateless manner.\n\nSee [`exporter.UpdateMetrics()`](https://pkg.go.dev/github.com/prometheus-community/yet-another-cloudwatch-exporter@v0.50.0/pkg#UpdateMetrics) for the documentation of the exporter entrypoint.\n\nApplications embedding YACE:\n- [Grafana Agent](https://github.com/grafana/agent/tree/release-v0.33/pkg/integrations/cloudwatch_exporter)\n"
  },
  {
    "path": "docs/feature_flags.md",
    "content": "# Feature flags\n\nList of features or changes that are disabled by default since they are breaking changes or are considered experimental. Their behavior can change in future releases which will be communicated via the release changelog.\n\nYou can enable them using the `-enable-feature` flag with a comma separated list of features. They may be enabled by default in future versions.\n\n## Always return info metrics\n\n`-enable-feature=always-return-info-metrics`\n\nReturn info metrics even if there are no CloudWatch metrics for the resource. This is useful if you want to get a complete picture of your estate, for example if you have some resources which have not yet been used.\n"
  },
  {
    "path": "docs/installation.md",
    "content": "# Installing and running YACE\n\nThere are various way to run YACE.\n\n## Binaries\n\nSee the [Releases](https://github.com/prometheus-community/yet-another-cloudwatch-exporter/releases) page to download binaries for various OS and arch.\n\n## Docker\n\nDocker images are available on GitHub Container Registry [here](https://github.com/prometheus-community/yet-another-cloudwatch-exporter/pkgs/container/yet-another-cloudwatch-exporter).\n\nThe image name is `quay.io/prometheuscommunity/yet-another-cloudwatch-exporter` and we support tags of the form `vX.Y.Z`.\n\nTo pull and run the image locally use:\n\n```shell\ndocker run -d --rm \\\n  -v $PWD/credentials:/home/.aws/credentials \\\n  -v $PWD/config.yml:/tmp/config.yml \\\n  -p 5000:5000 \\\n  --name yace quay.io/prometheuscommunity/yet-another-cloudwatch-exporter:latest\n```\n\nDo not forget the `v` prefix in the image version tag.\n\n## Docker compose\n\nSee the [docker-compose directory](../docker-compose/README.md).\n\n## Kubernetes\n\n### Install with HELM\n\nThe official [HELM chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-yet-another-cloudwatch-exporter) is the recommended way to install YACE in a Kubernetes cluster.\n\n### Install with manifests\n\nExample:\n\n```yaml\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: yace\ndata:\n  config.yml: |-\n    ---\n    # Start of config file\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: yace\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      name: yace\n  template:\n    metadata:\n      labels:\n        name: yace\n    spec:\n      containers:\n      - name: yace\n        image: quay.io/prometheuscommunity/yet-another-cloudwatch-exporter:vX.Y.Z # release version as tag - Do not forget the version 'v'\n        imagePullPolicy: IfNotPresent\n        args:\n          - \"--config.file=/tmp/config.yml\"\n        ports:\n        - name: app\n          containerPort: 5000\n        volumeMounts:\n        - name: config-volume\n          mountPath: /tmp\n      volumes:\n      - name: config-volume\n        configMap:\n          name: yace\n```\n"
  },
  {
    "path": "examples/alb.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ApplicationELB\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: HealthyHostCount\n          statistics: [Sum]\n        - name: UnHealthyHostCount\n          statistics: [Sum]\n        - name: RequestCount\n          statistics: [Average]\n        - name: TargetResponseTime\n          statistics: [Average]\n        - name: ActiveConnectionCount\n          statistics: [Sum]\n        - name: NewConnectionCount\n          statistics: [Sum]\n        - name: RejectedConnectionCount\n          statistics: [Sum]\n        - name: TargetConnectionErrorCount\n          statistics: [Sum]\n        - name: IPv6RequestCount\n          statistics: [Sum]\n        - name: RequestCountPerTarget\n          statistics: [Sum]\n        - name: NonStickyRequestCount\n          statistics: [Sum]\n        - name: HTTPCode_Target_2XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_Target_3XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_Target_4XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_Target_5XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_ELB_3XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_ELB_4XX_Count\n          statistics: [Sum]\n        - name: HTTPCode_ELB_5XX_Count\n          statistics: [Sum]\n        - name: ProcessedBytes\n          statistics: [Sum]\n        - name: IPv6ProcessedBytes\n          statistics: [Sum]\n        - name: ConsumedLCUs\n          statistics: [Average]\n        - name: ClientTLSNegotiationErrorCount\n          statistics: [Sum]\n        - name: TargetTLSNegotiationErrorCount\n          statistics: [Sum]\n        - name: RuleEvaluations\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/apigw.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ApiGateway\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: Latency\n          statistics: [Average, Maximum, p95, p99]\n        - name: Count\n          statistics: [SampleCount, Sum]\n        - name: 4xx\n          statistics: [Sum]\n        - name: 5xx\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/apprunner.yaml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - regions:\n        - us-east-1\n      period: 300\n      length: 300\n      type: AWS/AppRunner\n      metrics:\n        - name: MemoryUtilization\n          statistics:\n            - Average\n            - Maximum\n        - name: CPUUtilization\n          statistics:\n            - Average\n            - Maximum\n        - name: 2xxStatusResponses\n          statistics:\n            - Sum\n        - name: Requests\n          statistics:\n            - Sum\n        - name: RequestLatency\n          statistics:\n            - Average\n        - name: ActiveInstances\n          statistics:\n            - Maximum\n        - name: 4xxStatusResponses\n          statistics:\n            - Sum\n        - name: Concurrency\n          statistics:\n            - Maximum\n"
  },
  {
    "path": "examples/appstream.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/AppStream\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ActualCapacity\n          statistics: [Average]\n        - name: AvailableCapacity\n          statistics: [Average]\n        - name: CapacityUtilization\n          statistics: [Average]\n        - name: DesiredCapacity\n          statistics: [Average]\n        - name: InUseCapacity\n          statistics: [Average]\n        - name: PendingCapacity\n          statistics: [Average]\n        - name: RunningCapacity\n          statistics: [Average]\n        - name: InsufficientCapacityError\n          statistics: [Average]\n"
  },
  {
    "path": "examples/backup.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Backup\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: NumberOfBackupJobsCompleted\n          statistics: [Average]\n        - name: NumberOfBackupJobsCreated\n          statistics: [Average]\n        - name: NumberOfBackupJobsPending\n          statistics: [Average]\n        - name: NumberOfBackupJobsRunning\n          statistics: [Average]\n        - name: NumberOfBackupJobsAborted\n          statistics: [Average]\n        - name: NumberOfBackupJobsCompleted\n          statistics: [Average]\n        - name: NumberOfBackupJobsFailed\n          statistics: [Average]\n        - name: NumberOfBackupJobsExpired\n          statistics: [Average]\n"
  },
  {
    "path": "examples/cwagent.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: CWAgent\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: mem_used_percent\n          statistics: [Average]\n        - name: disk_used_percent\n          statistics: [Average]\n"
  },
  {
    "path": "examples/ds.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/DirectoryService\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: \"Bytes Sent/sec\"\n          statistics: [Average]\n        - name: \"% Processor Time\"\n          statistics: [Average]\n        - name: \"DS Directory Searches/Sec\"\n          statistics: [Average]\n        - name: \"Database Cache % Hit\"\n          statistics: [Average]\n        - name: \"% Free Space\"\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/dx.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/DX\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ConnectionState\n          statistics: [Maximum]\n        - name: VirtualInterfaceBpsIngress\n          statistics: [Average]\n        - name: VirtualInterfaceBpsEgress\n          statistics: [Average]\n        - name: VirtualInterfacePpsIngress\n          statistics: [Average]\n        - name: VirtualInterfacePpsEgress\n          statistics: [Average]\n        - name: ConnectionErrorCount\n          statistics: [Minimum, Maximum, Sum]\n"
  },
  {
    "path": "examples/ebs.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/EBS\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: VolumeReadBytes\n          statistics: [Sum]\n        - name: VolumeWriteBytes\n          statistics: [Sum]\n        - name: VolumeReadOps\n          statistics: [Average]\n        - name: VolumeWriteOps\n          statistics: [Average]\n        - name: VolumeTotalReadTime\n          statistics: [Average]\n        - name: VolumeTotalWriteTime\n          statistics: [Average]\n        - name: VolumeIdleTime\n          statistics: [Average]\n        - name: VolumeQueueLength\n          statistics: [Average]\n        - name: VolumeThroughputPercentage\n          statistics: [Average]\n        - name: VolumeConsumedReadWriteOps\n          statistics: [Average]\n        - name: BurstBalance\n          statistics: [Minimum]\n"
  },
  {
    "path": "examples/ec.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ElastiCache\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUUtilization\n          statistics: [Average]\n        - name: FreeableMemory\n          statistics: [Average]\n        - name: NetworkBytesIn\n          statistics: [Average]\n        - name: NetworkBytesOut\n          statistics: [Average]\n        - name: NetworkPacketsIn\n          statistics: [Average]\n        - name: NetworkPacketsOut\n          statistics: [Average]\n        - name: SwapUsage\n          statistics: [Average]\n        - name: CPUCreditUsage\n          statistics: [Average]\n"
  },
  {
    "path": "examples/ec2.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/EC2\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUUtilization\n          statistics: [Average]\n        - name: NetworkIn\n          statistics: [Average, Sum]\n        - name: NetworkOut\n          statistics: [Average, Sum]\n        - name: NetworkPacketsIn\n          statistics: [Sum]\n        - name: NetworkPacketsOut\n          statistics: [Sum]\n        - name: DiskReadBytes\n          statistics: [Sum]\n        - name: DiskWriteBytes\n          statistics: [Sum]\n        - name: DiskReadOps\n          statistics: [Sum]\n        - name: DiskWriteOps\n          statistics: [Sum]\n        - name: StatusCheckFailed\n          statistics: [Sum]\n        - name: StatusCheckFailed_Instance\n          statistics: [Sum]\n        - name: StatusCheckFailed_System\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/ecs.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ECS\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUReservation\n          statistics: [Average, Maximum]\n        - name: MemoryReservation\n          statistics: [Average, Maximum]\n        - name: CPUUtilization\n          statistics: [Average, Maximum]\n        - name: MemoryUtilization\n          statistics: [Average, Maximum]\n"
  },
  {
    "path": "examples/elb.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ELB\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: BackendConnectionErrors\n          statistics: [Sum]\n        - name: HTTPCode_Backend_2XX\n          statistics: [Sum]\n        - name: HTTPCode_Backend_3XX\n          statistics: [Sum]\n        - name: HTTPCode_Backend_4XX\n          statistics: [Sum]\n        - name: HTTPCode_Backend_5XX\n          statistics: [Sum]\n        - name: HTTPCode_ELB_4XX\n          statistics: [Sum]\n        - name: HTTPCode_ELB_5XX\n          statistics: [Sum]\n        - name: RequestCount\n          statistics: [Sum]\n        - name: Latency\n          statistics: [Average]\n        - name: SurgeQueueLength\n          statistics: [Average]\n        - name: SpilloverCount\n          statistics: [Sum]\n        - name: HealthyHostCount\n          statistics: [Minimum, Maximum]\n        - name: UnHealthyHostCount\n          statistics: [Minimum, Maximum]\n"
  },
  {
    "path": "examples/es.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/ES\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUUtilization\n          statistics: [Average]\n        - name: FreeStorageSpace\n          statistics: [Sum]\n        - name: ClusterStatus.green\n          statistics: [Maximum]\n        - name: ClusterStatus.yellow\n          statistics: [Maximum]\n        - name: ClusterStatus.red\n          statistics: [Maximum]\n        - name: Shards.active\n          statistics: [Sum]\n        - name: Shards.unassigned\n          statistics: [Sum]\n        - name: Shards.delayedUnassigned\n          statistics: [Sum]\n        - name: Shards.activePrimary\n          statistics: [Sum]\n        - name: Shards.initializing\n          statistics: [Sum]\n        - name: Shards.initializing\n          statistics: [Sum]\n        - name: Shards.relocating\n          statistics: [Sum]\n        - name: Nodes\n          statistics: [Maximum]\n        - name: SearchableDocuments\n          statistics: [Maximum]\n        - name: DeletedDocuments\n          statistics: [Maximum]\n"
  },
  {
    "path": "examples/historic-data.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/SQS\n      regions:\n        - us-east-1\n      period: 60\n      length: 300\n      addCloudwatchTimestamp: true\n      exportAllDataPoints: true\n      metrics:\n        - name: NumberOfMessagesSent\n          statistics: [Sum]\n        - name: NumberOfMessagesReceived\n          statistics: [Sum]\n        - name: NumberOfMessagesDeleted\n          statistics: [Sum]\n        - name: ApproximateAgeOfOldestMessage\n          statistics: [Average]\n        - name: NumberOfEmptyReceives\n          statistics: [Sum]\n        - name: SentMessageSize\n          statistics: [Average]\n        - name: ApproximateNumberOfMessagesNotVisible\n          statistics: [Sum]\n        - name: ApproximateNumberOfMessagesDelayed\n          statistics: [Sum]\n        - name: ApproximateNumberOfMessagesVisible\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/kafka.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Kafka\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: BytesInPerSec\n          statistics: [Average]\n        - name: BytesOutPerSec\n          statistics: [Average]\n        - name: RootDiskUsed\n          statistics: [Average]\n        - name: KafkaDataLogsDiskUsed\n          statistics: [Average]\n        - name: KafkaAppLogsDiskUsed\n          statistics: [Average]\n        - name: MemoryFree\n          statistics: [Average]\n        - name: MemoryUsed\n          statistics: [Average]\n        - name: NetworkRxPackets\n          statistics: [Average]\n        - name: NetworkTxPackets\n          statistics: [Average]\n        - name: SwapFree\n          statistics: [Average]\n        - name: SwapUsed\n          statistics: [Average]\n        - name: GlobalTopicCount\n          statistics: [Maximum]\n        - name: GlobalPartitionCount\n          statistics: [Maximum]\n        - name: CpuUser\n          statistics: [Average]\n        - name: CpuSystem\n          statistics: [Average]\n        - name: CpuIdle\n          statistics: [Average]\n"
  },
  {
    "path": "examples/kinesis.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Kinesis\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: PutRecord.Latency\n          statistics: [Average]\n        - name: PutRecord.Success\n          statistics: [Sum]\n        - name: PutRecord.Bytes\n          statistics: [Sum]\n        - name: PutRecords.Latency\n          statistics: [Average]\n        - name: PutRecords.Records\n          statistics: [Sum]\n        - name: PutRecords.Success\n          statistics: [Sum]\n        - name: PutRecords.Bytes\n          statistics: [Sum]\n        - name: GetRecords.Latency\n          statistics: [Average]\n        - name: GetRecords.Records\n          statistics: [Sum]\n        - name: GetRecords.Success\n          statistics: [Sum]\n        - name: GetRecords.Bytes\n          statistics: [Sum]\n        - name: GetRecords.IteratorAgeMilliseconds\n          statistics: [Average]\n        - name: IncomingBytes\n          statistics: [Sum]\n        - name: IncomingRecords\n          statistics: [Sum]\n        - name: OutgoingBytes\n          statistics: [Sum]\n        - name: OutgoingRecords\n          statistics: [Sum]\n        - name: WriteProvisionedThroughputExceeded\n          statistics: [Average]\n        - name: ReadProvisionedThroughputExceeded\n          statistics: [Average]\n"
  },
  {
    "path": "examples/kms.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/KMS\n      regions:\n        - us-east-1\n      period: 300\n      metrics:\n        - name: SecondsUntilKeyMaterialExpiration\n          statistics: [Maximum, Minimum]\n"
  },
  {
    "path": "examples/lambda.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Lambda\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: Invocations\n          statistics: [Sum]\n        - name: Errors\n          statistics: [Sum]\n        - name: Throttles\n          statistics: [Sum]\n        - name: Duration\n          statistics: [Average, Maximum, Minimum, p90]\n"
  },
  {
    "path": "examples/lambda_edge.yml",
    "content": "  # We can't configure discovery job for edge lambda function but static works.,he region is always us-east-1.\n  # Other regions can be added in use as edge locations\napiVersion: v1alpha1\nstatic:\n  - name: us-east-1.<edge_lambda_function_name>\n    namespace: AWS/Lambda\n    regions:\n      - eu-central-1\n      - us-east-1\n      - us-west-2\n      - ap-southeast-1\n    period: 600\n    length: 600\n    metrics:\n      - name: Invocations\n        statistics: [Sum]\n      - name: Errors\n        statistics: [Sum]\n      - name: Throttles\n        statistics: [Sum]\n      - name: Duration\n        statistics: [Average, Maximum, Minimum, p90]\n"
  },
  {
    "path": "examples/logs.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Logs\n      regions:\n        - us-east-1\n      period: 60\n      length: 60\n      delay: 120\n      statistics: [Sum]\n      metrics:\n        - name: DeliveryErrors\n        - name: DeliveryThrottling\n        - name: EMFParsingErrors\n        - name: EMFValidationErrors\n        - name: ForwardedBytes\n        - name: ForwardedLogEvents\n        - name: IncomingBytes\n        - name: IncomingLogEvents\n"
  },
  {
    "path": "examples/mq.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/AmazonMQ\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: NetworkOut\n          statistics: [Minimum, Maximum, Average]\n        - name: NetworkIn\n          statistics: [Minimum, Maximum, Average]\n        - name: QueueSize\n          statistics: [Minimum, Maximum, Average]\n        - name: ConsumerCount\n          statistics: [Minimum, Maximum, Average]\n        - name: ProducerCount\n          statistics: [Minimum, Maximum, Average]\n        - name: EnqueueCount\n          statistics: [Minimum, Maximum, Average]\n        - name: DequeueCount\n          statistics: [Minimum, Maximum, Average]\n        - name: MemoryUsage\n          statistics: [Minimum, Maximum, Average]\n        - name: CpuUtilization\n          statistics: [Minimum, Maximum, Average]\n"
  },
  {
    "path": "examples/networkmanager.yml",
    "content": "# https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-metrics.html\napiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Network Manager\n      regions:\n        - us-west-2\n      period: 60\n      length: 300\n      metrics:\n        - name: BytesDropCountBlackhole\n          statistics: [Sum]\n        - name: BytesDropCountNoRoute\n          statistics: [Sum]\n        - name: BytesIn\n          statistics: [Sum]\n        - name: BytesOut\n          statistics: [Sum]\n        - name: PacketsDropCountBlackhole\n          statistics: [Sum]\n        - name: PacketsDropCountNoRoute\n          statistics: [Sum]\n        - name: PacketDropCountTTLExpired\n          statistics: [Sum]\n        - name: PacketsIn\n          statistics: [Sum]\n        - name: PacketsOut\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/ngw.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/NATGateway\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ActiveConnectionCount\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: BytesInFromDestination\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: BytesInFromSource\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: BytesOutToDestination\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: BytesOutToSource\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: ConnectionAttemptCount\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: ConnectionEstablishedCount\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: ErrorPortAllocation\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: IdleTimeoutCount\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: PacketsDropCount\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: PacketsInFromDestination\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: PacketsInFromSource\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: PacketsOutToDestination\n          statistics: [Average, Minimum, Maximum, Sum]\n        - name: PacketsOutToSource\n          statistics: [Average, Minimum, Maximum, Sum]\n"
  },
  {
    "path": "examples/nlb.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/NetworkELB\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ActiveFlowCount\n          statistics: [Average, Minimum, Maximum]\n        - name: ActiveFlowCount_TLS\n          statistics: [Average, Minimum, Maximum]\n        - name: ActiveFlowCount_UDP\n          statistics: [Average, Minimum, Maximum]\n        - name: PortAllocationErrorCount\n          statistics: [Minimum, Maximum, Sum]\n        - name: ProcessedBytes\n          statistics: [Minimum, Maximum, Sum]\n        - name: ProcessedPackets\n          statistics: [Minimum, Maximum, Sum]\n"
  },
  {
    "path": "examples/private-link-endpoints.yaml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/PrivateLinkEndpoints\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ActiveConnections\n          statistics: [Average]\n        - name: NewConnections\n          statistics: [Average, Sum]\n        - name: PacketsDropped\n          statistics: [Average, Sum]\n        - name: BytesProcessed\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/private-link-services.yaml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/PrivateLinkServices\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: ActiveConnections\n          statistics: [Average]\n        - name: NewConnections\n          statistics: [Average, Sum]\n        - name: PacketsDropped\n          statistics: [Average, Sum]\n        - name: BytesProcessed\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/qldb.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  exportedTagsOnMetrics:\n    AWS/QLDB:\n      - Name\n  jobs:\n    - type: AWS/QLDB\n      regions:\n        - us-east-2\n      period: 300\n      length: 300\n      metrics:\n        - name: JournalStorage\n          statistics:\n            - Average\n        - name: IndexedStorage\n          statistics:\n            - Average\n        - name: ReadIOs\n          statistics:\n            - Sum\n        - name: WriteIOs\n          statistics:\n            - Sum\n        - name: CommandLatency\n          statistics:\n            - Average\n        - name: OccConflictExceptions\n          statistics:\n            - Sum\n        - name: Session4xxExceptions\n          statistics:\n            - Sum\n        - name: Session5xxExceptions\n          statistics:\n            - Sum\n        - name: SessionRateExceededExceptions\n          statistics:\n            - Sum\n"
  },
  {
    "path": "examples/quicksight.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/QuickSight\n      regions:\n        - eu-west-2\n      period: 30000\n      length: 30000\n      metrics:\n        - name: IngestionErrorCount\n          statistics: [Sum]\n        - name: IngestionRowCount\n          statistics: [Sum]\n        - name: IngestionInvocationCount\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/rds.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/RDS\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CPUUtilization\n          statistics: [Maximum]\n        - name: DatabaseConnections\n          statistics: [Sum]\n        - name: FreeableMemory\n          statistics: [Average]\n        - name: FreeStorageSpace\n          statistics: [Average]\n        - name: ReadThroughput\n          statistics: [Average]\n        - name: WriteThroughput\n          statistics: [Average]\n        - name: ReadLatency\n          statistics: [Maximum]\n        - name: WriteLatency\n          statistics: [Maximum]\n        - name: ReadIOPS\n          statistics: [Average]\n        - name: WriteIOPS\n          statistics: [Average]\n"
  },
  {
    "path": "examples/redshift-serverless.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Redshift-Serverless\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: DatabaseConnections\n          statistics: [Average]\n        - name: ComputeCapacity\n          statistics: [Average]\n        - name: QueryRuntimeBreakdown\n          statistics: [Average]\n        - name: QueriesRunning\n          statistics: [Average]\n        - name: QueriesQueued\n          statistics: [Average]\n        - name: QueryDuration\n          statistics: [Average]\n"
  },
  {
    "path": "examples/s3.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - us-east-1\n      period: 86400\n      length: 86400\n      metrics:\n        - name: NumberOfObjects\n          statistics: [Average]\n        - name: BucketSizeBytes\n          statistics: [Average]\n"
  },
  {
    "path": "examples/ses.yaml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/SES\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: Send\n          statistics: [Sum]\n        - name: Delivery\n          statistics: [Sum]\n        - name: Bounce\n          statistics: [Sum]\n        - name: Reputation.ComplaintRate\n          statistics: [Sum]\n        - name: Reputation.BounceRate\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/sns.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/SNS\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: NumberOfMessagesPublished\n          statistics: [Sum]\n        - name: NumberOfNotificationsDelivered\n          statistics: [Sum]\n        - name: NumberOfNotificationsFailed\n          statistics: [Sum]\n        - name: NumberOfNotificationsFilteredOut\n          statistics: [Sum]\n        - name: PublishSize\n          statistics: [Average]\n"
  },
  {
    "path": "examples/sqs.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/SQS\n      regions:\n        - us-east-1\n      period: 60\n      length: 60\n      metrics:\n        - name: NumberOfMessagesSent\n          statistics: [Sum]\n        - name: NumberOfMessagesReceived\n          statistics: [Sum]\n        - name: NumberOfMessagesDeleted\n          statistics: [Sum]\n        - name: ApproximateAgeOfOldestMessage\n          statistics: [Average]\n        - name: NumberOfEmptyReceives\n          statistics: [Sum]\n        - name: SentMessageSize\n          statistics: [Average]\n        - name: ApproximateNumberOfMessagesNotVisible\n          statistics: [Sum]\n        - name: ApproximateNumberOfMessagesDelayed\n          statistics: [Sum]\n        - name: ApproximateNumberOfMessagesVisible\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/usage.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/Usage\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: CallCount\n          statistics: [Sum]\n        - name: ResourceCount\n          statistics: [Sum]\n"
  },
  {
    "path": "examples/vpn.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/VPN\n      regions:\n        - us-east-1\n      period: 300\n      length: 300\n      metrics:\n        - name: TunnelDataIn\n          statistics: [Sum]\n        - name: TunnelDataOut\n          statistics: [Sum]\n        - name: TunnelState\n          statistics: [Maximum]\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/prometheus-community/yet-another-cloudwatch-exporter\n\ngo 1.25.0\n\nrequire (\n\tgithub.com/aws/aws-sdk-go-v2 v1.41.1\n\tgithub.com/aws/aws-sdk-go-v2/config v1.32.7\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.19.7\n\tgithub.com/aws/aws-sdk-go-v2/service/amp v1.42.5\n\tgithub.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4\n\tgithub.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.33.5\n\tgithub.com/aws/aws-sdk-go-v2/service/autoscaling v1.63.0\n\tgithub.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1\n\tgithub.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.61.5\n\tgithub.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6\n\tgithub.com/aws/aws-sdk-go-v2/service/ec2 v1.280.0\n\tgithub.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9\n\tgithub.com/aws/aws-sdk-go-v2/service/iam v1.53.2\n\tgithub.com/aws/aws-sdk-go-v2/service/lambda v1.87.1\n\tgithub.com/aws/aws-sdk-go-v2/service/rds v1.114.0\n\tgithub.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.6\n\tgithub.com/aws/aws-sdk-go-v2/service/shield v1.34.17\n\tgithub.com/aws/aws-sdk-go-v2/service/storagegateway v1.43.10\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6\n\tgithub.com/aws/smithy-go v1.24.2\n\tgithub.com/grafana/regexp v0.0.0-20240607082908-2cb410fa05da\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/prometheus/client_model v0.6.2\n\tgithub.com/prometheus/common v0.67.5\n\tgithub.com/r3labs/diff/v3 v3.0.1\n\tgithub.com/stretchr/testify v1.11.1\n\tgithub.com/urfave/cli/v2 v2.27.7\n\tgo.uber.org/atomic v1.11.0\n\tgo.yaml.in/yaml/v2 v2.4.4\n\tgolang.org/x/exp v0.0.0-20240823005443-9b4947da3948\n\tgolang.org/x/sync v0.19.0\n)\n\nrequire (\n\tgithub.com/alecthomas/kingpin/v2 v2.4.0 // indirect\n\tgithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect\n\tgithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/kylelemons/godebug v1.1.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgithub.com/prometheus/procfs v0.16.1 // indirect\n\tgithub.com/russross/blackfriday/v2 v2.1.0 // indirect\n\tgithub.com/vmihailenco/msgpack/v5 v5.3.5 // indirect\n\tgithub.com/vmihailenco/tagparser/v2 v2.0.0 // indirect\n\tgithub.com/xhit/go-str2duration/v2 v2.1.0 // indirect\n\tgithub.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect\n\tgolang.org/x/sys v0.39.0 // indirect\n\tgoogle.golang.org/protobuf v1.36.11 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=\ngithub.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=\ngithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=\ngithub.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=\ngithub.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=\ngithub.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=\ngithub.com/aws/aws-sdk-go-v2/service/amp v1.42.5 h1:Pd07a2Tdhl3591h+hbJZCC+50NGraSyt/I6yLx4FDak=\ngithub.com/aws/aws-sdk-go-v2/service/amp v1.42.5/go.mod h1:6q5j2wH8o1tf4glByj2hBDIEiOAKDh0x5QpjLKmIi40=\ngithub.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4 h1:V8gcFwJPP3eXZXpeui+p97JmO7WtCkQlEAHrE6Kyt0k=\ngithub.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4/go.mod h1:iJF5UdwkFue/YuUGCFsCCdT3SBMUx0s+h5TNi0Sz+qg=\ngithub.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.33.5 h1:VUf8W+s2EQwajy6n+xCN9ctkhJsCJbpwPmzf49NtJM8=\ngithub.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.33.5/go.mod h1:0/7yOW11zIEYILivvAmnKbyvYG+34Zb/JrnywtskyLw=\ngithub.com/aws/aws-sdk-go-v2/service/autoscaling v1.63.0 h1:ffFts1+wfxmRrJ6tQJnhh6+p1TeQDplJ1iLrZopUM9w=\ngithub.com/aws/aws-sdk-go-v2/service/autoscaling v1.63.0/go.mod h1:8O5Pj92iNpfw/Fa7WdHbn6YiEjDoVdutz+9PGRNoP3Y=\ngithub.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1 h1:ElB5x0nrBHgQs+XcpQ1XJpSJzMFCq6fDTpT6WQCWOtQ=\ngithub.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1/go.mod h1:Cj+LUEvAU073qB2jInKV6Y0nvHX0k7bL7KAga9zZ3jw=\ngithub.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.61.5 h1:3d44lDPnuYJn1xSf7R4J2zEEL+CO5ooxci9OjI3xAh8=\ngithub.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.61.5/go.mod h1:XKPSi5JA8Wm59aLAmFoshAdBrY6YQnomNDbvYgNr/l8=\ngithub.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6 h1:LNmvkGzDO5PYXDW6m7igx+s2jKaPchpfbS0uDICywFc=\ngithub.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6/go.mod h1:ctEsEHY2vFQc6i4KU07q4n68v7BAmTbujv2Y+z8+hQY=\ngithub.com/aws/aws-sdk-go-v2/service/ec2 v1.280.0 h1:1KXSI/tWq+pdp3hz8Kfq2ngUcrBW28pIdoOhLWYHXW0=\ngithub.com/aws/aws-sdk-go-v2/service/ec2 v1.280.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=\ngithub.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9 h1:hTgZLyNoDWphZUtTtcvQh0LP6TZO0mtdSfZK/GObDLk=\ngithub.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9/go.mod h1:91RkIYy9ubykxB50XGYDsbljLZnrZ6rp/Urt4rZrbwQ=\ngithub.com/aws/aws-sdk-go-v2/service/iam v1.53.2 h1:62G6btFUwAa5uR5iPlnlNVAM0zJSLbWgDfKOfUC7oW4=\ngithub.com/aws/aws-sdk-go-v2/service/iam v1.53.2/go.mod h1:av9clChrbZbJ5E21msSsiT2oghl2BJHfQGhCkXmhyu8=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=\ngithub.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 h1:Nhx/OYX+ukejm9t/MkWI8sucnsiroNYNGb5ddI9ungQ=\ngithub.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17/go.mod h1:AjmK8JWnlAevq1b1NBtv5oQVG4iqnYXUufdgol+q9wg=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=\ngithub.com/aws/aws-sdk-go-v2/service/lambda v1.87.1 h1:QBdmTXWwqVgx0PueT/Xgp2+al5HR0gAV743pTzYeBRw=\ngithub.com/aws/aws-sdk-go-v2/service/lambda v1.87.1/go.mod h1:ogjbkxFgFOjG3dYFQ8irC92gQfpfMDcy1RDKNSZWXNU=\ngithub.com/aws/aws-sdk-go-v2/service/rds v1.114.0 h1:p9c6HDzx6sTf7uyc9xsQd693uzArsPrsVr9n0oRk7DU=\ngithub.com/aws/aws-sdk-go-v2/service/rds v1.114.0/go.mod h1:JBRYWpz5oXQtHgQC+X8LX9lh0FBCwRHJlWEIT+TTLaE=\ngithub.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.6 h1:gd7YMnFZQGdy4lERF9ffz9kbc6K/IPhCu5CrJDJr8XY=\ngithub.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.31.6/go.mod h1:lnTv81am9e2C2SjX3VKyUrKEzDADD9lKST9ou96UBoY=\ngithub.com/aws/aws-sdk-go-v2/service/shield v1.34.17 h1:XOqXVwczmfk6/GtGW7eee1RvCp7NhPKn8wYbZp+yTa8=\ngithub.com/aws/aws-sdk-go-v2/service/shield v1.34.17/go.mod h1:eQV3cCW6J6J+cpBitDt/tDvVTmBFTdlZdEGNKsB76O8=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=\ngithub.com/aws/aws-sdk-go-v2/service/storagegateway v1.43.10 h1:E0WFFeaadVwljcYiyMLtpha8GSewQJg4n0xw49MXuds=\ngithub.com/aws/aws-sdk-go-v2/service/storagegateway v1.43.10/go.mod h1:QoprJo5GSv73ompRyJRq2sXmvodjOZc3eBfvbotVefw=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=\ngithub.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=\ngithub.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/grafana/regexp v0.0.0-20240607082908-2cb410fa05da h1:BML5sNe+bw2uO8t8cQSwe5QhvoP04eHPF7bnaQma0Kw=\ngithub.com/grafana/regexp v0.0.0-20240607082908-2cb410fa05da/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=\ngithub.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=\ngithub.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=\ngithub.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=\ngithub.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=\ngithub.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=\ngithub.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg=\ngithub.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo=\ngithub.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=\ngithub.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=\ngithub.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=\ngithub.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=\ngithub.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=\ngithub.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=\ngithub.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=\ngithub.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=\ngithub.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=\ngithub.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=\ngithub.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=\ngithub.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=\ngo.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=\ngo.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ=\ngo.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ=\ngolang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=\ngolang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=\ngolang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "mixin/README.md",
    "content": "# CloudWatch Mixin\n\nThis is a Prometheus [Monitoring Mixin](https://monitoring.mixins.dev/) that comes with pre-defined dashboards.\n\nIt can be installed e.g. with [Grizzly](https://grafana.github.io/grizzly).\n\nFirst, install [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler) with\n\n```\ngo install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest\n```\n\nThen install all the dependencies of this mixin:\n\n```\njb install\n```\n\nFinally, install `Grizzly` and apply the mixin to your Grafana instance:\n\n```\ngo install github.com/grafana/grizzly/cmd/grr@latest\ngrr apply mixin.libsonnet\n```\n"
  },
  {
    "path": "mixin/config.libsonnet",
    "content": "{\n  // use to override the default configuration of base mixin\n  _config+:: {\n  },\n}\n"
  },
  {
    "path": "mixin/dashboards/all.libsonnet",
    "content": "{\n  'ebs.json': import 'ebs.libsonnet',\n  'ec2.json': import 'ec2.libsonnet',\n  'lambda.json': import 'lambda.libsonnet',\n  'rds.json': import 'rds.libsonnet',\n  's3.json': import 's3.libsonnet',\n}\n"
  },
  {
    "path": "mixin/dashboards/common.libsonnet",
    "content": "{\n  // Tooltip type\n  // 0 = 'default': no shared crosshair or tooltip\n  // 1 = 'shared_crosshair': shared tooltip\n  // 2 = 'shared_tooltip': shared crosshair AND shared tooltip\n  tooltipSharedCrosshair: 1,\n\n  // Refresh\n  // 1 = 'load': Queries the data source every time the dashboard loads\n  // 2 = 'time': Queries the data source when the dashboard time range changes\n  refreshOnPageLoad: 1,\n  refreshOnTimeRangeChange: 2,\n\n  // Sorting\n  // 0: Without Sort,\n  // 1: Alphabetical (asc)\n  // 2: Alphabetical (desc)\n  // 3: Numerical (asc)\n  // 4: Numerical (desc)\n  sortAlphabeticalAsc: 1,\n}\n"
  },
  {
    "path": "mixin/dashboards/ebs.libsonnet",
    "content": "local common = import 'common.libsonnet';\nlocal grafana = import 'grafonnet-7.0/grafana.libsonnet';\n\nlocal allLabels = 'job=~\"$job\", region=~\"$region\", dimension_VolumeId=~\"$volume\"';\n\ngrafana.dashboard.new(\n  title='AWS EBS',\n  description='Visualize Amazon EBS metrics',\n  tags=['Amazon', 'AWS', 'CloudWatch', 'EBS'],\n  graphTooltip=common.tooltipSharedCrosshair,\n)\n.addTemplate(\n  grafana.template.datasource.new(\n    name='datasource',\n    query='prometheus',\n    label='Data Source',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='job',\n    label='job',\n    datasource='$datasource',\n    query='label_values(aws_ebs_volume_idle_time_average, job)',\n    refresh=common.refreshOnPageLoad,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='region',\n    label='Region',\n    datasource='$datasource',\n    query='label_values(aws_ebs_volume_idle_time_average, region)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='volume',\n    label='Volume',\n    datasource='$datasource',\n    query='label_values(aws_ebs_volume_idle_time_average{job=~\"$job\", region=~\"$region\"}, dimension_VolumeId)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addPanels(\n  [\n    grafana.panel.text.new(\n      title='Info',\n      content=|||\n        Showing metrics only for AWS resources that have tags assigned to them. For more information, see [Amazon CloudWatch Metrics for Amazon EBS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cloudwatch_ebs.html).\n      |||,\n    )\n    .setGridPos(w=24, h=3),\n\n    grafana.panel.graph.new(\n      title='Volume read bandwidth (bytes)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='bps', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_read_bytes_sum{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume write bandwidth (bytes)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='bps', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_write_bytes_sum{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume read throughput (operations)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=0, y=8)\n    .addYaxis(format='ops', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_read_ops_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume write throughput (operations)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12, y=8)\n    .addYaxis(format='ops', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_write_ops_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume idle time',\n      datasource='$datasource',\n    )\n    .setGridPos(w=8, h=8, x=0, y=16)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_idle_time_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume total read time',\n      datasource='$datasource',\n    )\n    .setGridPos(w=8, h=8, x=8, y=16)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_total_read_time_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume total write time',\n      datasource='$datasource',\n    )\n    .setGridPos(w=8, h=8, x=16, y=16)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_total_write_time_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume queue length (bytes)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=0, y=24)\n    .addYaxis(format='short', min=0, max=1)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_queue_length_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume throughput percentage',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12, y=24)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_throughput_percentage_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n\n    grafana.panel.graph.new(\n      title='Burst balance',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=0, y=32)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_burst_balance_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Volume consumed r/w operations',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12, y=32)\n    .addYaxis(format='short')\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ebs_volume_consumed_read_write_ops_average{%s}' % [allLabels],\n        legendFormat='{{dimension_VolumeId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n  ]\n)\n"
  },
  {
    "path": "mixin/dashboards/ec2.libsonnet",
    "content": "local common = import 'common.libsonnet';\nlocal grafana = import 'grafonnet-7.0/grafana.libsonnet';\n\nlocal allLabels = 'job=~\"$job\", region=~\"$region\", dimension_InstanceId=~\"$instance\"';\n\ngrafana.dashboard.new(\n  title='AWS EC2',\n  description='Visualize Amazon EC2 metrics',\n  tags=['Amazon', 'AWS', 'CloudWatch', 'EC2'],\n  graphTooltip=common.tooltipSharedCrosshair,\n)\n.addTemplate(\n  grafana.template.datasource.new(\n    name='datasource',\n    query='prometheus',\n    label='Data Source',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='job',\n    label='job',\n    datasource='$datasource',\n    query='label_values(aws_ec2_cpuutilization_maximum, job)',\n    refresh=common.refreshOnPageLoad,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='region',\n    label='Region',\n    datasource='$datasource',\n    query='label_values(aws_ec2_cpuutilization_maximum, region)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='instance',\n    label='instance',\n    datasource='$datasource',\n    query='label_values(aws_ec2_cpuutilization_maximum{job=~\"$job\", region=~\"$region\"}, dimension_InstanceId)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addPanels(\n  [\n    grafana.panel.text.new(\n      title='Info',\n      content=|||\n        Showing metrics only for AWS resources that have tags assigned to them. For more information, see [Amazon CloudWatch Metrics for Amazon EC2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/viewing_metrics_with_cloudwatch.html).\n      |||,\n    )\n    .setGridPos(w=24, h=3),\n\n    grafana.panel.graph.new(\n      title='CPU utilization',\n      datasource='$datasource',\n    )\n    .setGridPos(w=24, h=8, x=0, y=3)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ec2_cpuutilization_maximum{%s}' % [allLabels],\n        legendFormat='{{dimension_InstanceId}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Average network traffic',\n      datasource='$datasource',\n    )\n    .setGridPos(w=24, h=8, x=0, y=11)\n    .addYaxis(\n      format='bps',\n      label='bytes in (+) / out (-)'\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ec2_network_in_average{%s}' % [allLabels],\n        legendFormat='{{dimension_InstanceId}} inbound',\n        datasource='$datasource',\n      ),\n    )\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_ec2_network_out_average{%s}' % [allLabels],\n        legendFormat='{{dimension_InstanceId}} outbound',\n        datasource='$datasource',\n      ),\n    )\n    .addSeriesOverride(alias='/.*outbound/', transform='negative-Y'),\n\n    grafana.panel.row.new(\n      title='Network details',\n    )\n    .setGridPos(w=12, h=16, x=0, y=19)\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Inbound network traffic',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=0, y=19)\n      .addYaxis(\n        format='bps',\n        min=0,\n      )\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_network_in_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Outbound network traffic',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=12, y=19)\n      .addYaxis(format='bps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_network_out_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Inbound network packets',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=0, y=27)\n      .addYaxis(format='pps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_network_packets_in_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Outbound network packets',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=12, y=27)\n      .addYaxis(format='pps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_network_packets_out_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    ),\n\n    grafana.panel.row.new(\n      title='Disk details',\n    )\n    .setGridPos(w=24, h=18, x=0, y=35)\n    .addPanel(\n      grafana.panel.text.new(\n        content='The following metrics are reported for EC2 Instance Store Volumes. For Amazon EBS volumes, see the EBS dashboard.',\n      )\n      .setGridPos(w=24, h=2, x=0, y=35),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Disk reads (bytes)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=0, y=37)\n      .addYaxis(format='bps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_disk_read_bytes_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Disk writes (bytes)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=12, y=37)\n      .addYaxis(format='bps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_disk_write_bytes_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Disk read (operations)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=0, y=45)\n      .addYaxis(format='pps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_disk_read_ops_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Disk write (operations)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=12, y=45)\n      .addYaxis(format='pps', min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_disk_write_ops_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    ),\n\n    grafana.panel.row.new(\n      title='Status checks',\n    )\n    .setGridPos(w=24, h=8, x=0, y=53)\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Status check failed (system)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=8, h=8, x=0, y=53)\n      .addYaxis(min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_status_check_failed_system_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Status check failed (instance)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=8, h=8, x=8, y=53)\n      .addYaxis(min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_status_check_failed_instance_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Status check failed (all)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=8, h=8, x=16, y=53)\n      .addYaxis(min=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_ec2_status_check_failed_sum{%s}' % [allLabels],\n          legendFormat='{{dimension_InstanceId}}',\n          datasource='$datasource',\n        ),\n      ),\n    ),\n  ],\n)\n"
  },
  {
    "path": "mixin/dashboards/lambda.libsonnet",
    "content": "local common = import 'common.libsonnet';\nlocal grafana = import 'grafonnet-7.0/grafana.libsonnet';\n\nlocal allLabels = 'job=~\"$job\", region=~\"$region\", dimension_FunctionName=~\"$function_name\", dimension_Resource=~\"$resource\", dimension_ExecutedVersion=~\"$executed_version\"';\n\ngrafana.dashboard.new(\n  title='AWS Lambda',\n  description='Visualize Amazon Lambda metrics',\n  tags=['Amazon', 'AWS', 'CloudWatch', 'Lambda'],\n  graphTooltip=common.tooltipSharedCrosshair,\n)\n.addTemplate(\n  grafana.template.datasource.new(\n    name='datasource',\n    query='prometheus',\n    label='Data Source',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='job',\n    label='job',\n    datasource='$datasource',\n    query='label_values(aws_lambda_invocations_sum, job)',\n    refresh=common.refreshOnPageLoad,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='region',\n    label='Region',\n    datasource='$datasource',\n    query='label_values(aws_lambda_invocations_sum, region)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='function_name',\n    label='Function name',\n    datasource='$datasource',\n    query='label_values(aws_lambda_invocations_sum{job=~\"$job\", region=~\"$region\"}, dimension_FunctionName)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='resource',\n    label='Resource',\n    datasource='$datasource',\n    query='label_values(aws_lambda_invocations_sum{job=~\"$job\", region=~\"$region\", dimension_FunctionName=~\"$function_name\"}, dimension_Resource)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='executed_version',\n    label='Executed Version',\n    datasource='$datasource',\n    query='label_values(aws_lambda_invocations_sum{job=~\"$job\", region=~\"$region\", dimension_FunctionName=~\"$function_name\", dimension_Resource=~\"$resource\"}, dimension_ExecutedVersion)',\n    refresh=common.refreshOnTimeRangeChange,\n    allValue='.*',\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addPanels(\n  [\n    grafana.panel.text.new(\n      title='Info',\n      content=|||\n        Showing metrics only for AWS resources that have tags assigned to them. For more information, see [Amazon CloudWatch Metrics for Amazon Lambda](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-metrics.html).\n      |||,\n    )\n    .setGridPos(w=24, h=3),\n\n    grafana.panel.graph.new(\n      title='Invocations',\n      description='The number of times your function code is executed.',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='short', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_invocations_sum{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Errors',\n      description='The number of invocations that result in a function error.',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='short', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_errors_sum{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Throttles',\n      description='The number of invocation requests that are throttled.',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='short', min=0, decimals=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_throttles_sum{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Duration',\n      description='The time that your function code spends processing an event.',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='ms', min=0, decimals=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_duration_p90{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}} (p90)',\n        datasource='$datasource',\n      ),\n    )\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_duration_minimum{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}} (min)',\n        datasource='$datasource',\n      ),\n    )\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum by (dimension_FunctionName) (aws_lambda_duration_maximum{%s})' % [allLabels],\n        legendFormat='{{dimension_FunctionName}} (max)',\n        datasource='$datasource',\n      ),\n    ),\n  ]\n)\n"
  },
  {
    "path": "mixin/dashboards/rds.libsonnet",
    "content": "local common = import 'common.libsonnet';\nlocal grafana = import 'grafonnet-7.0/grafana.libsonnet';\n\nlocal allLabels = 'job=~\"$job\", region=~\"$region\", dimension_DBInstanceIdentifier=~\"$instance\"';\n\ngrafana.dashboard.new(\n  title='AWS RDS',\n  description='Visualize Amazon RDS metrics',\n  tags=['Amazon', 'AWS', 'CloudWatch', 'RDS'],\n  graphTooltip=common.tooltipSharedCrosshair,\n)\n.addTemplate(\n  grafana.template.datasource.new(\n    name='datasource',\n    query='prometheus',\n    label='Data Source',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='job',\n    label='job',\n    datasource='$datasource',\n    query='label_values(aws_rds_database_connections_sum, job)',\n    refresh=common.refreshOnPageLoad,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='region',\n    label='Region',\n    datasource='$datasource',\n    query='label_values(aws_rds_database_connections_sum, region)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='instance',\n    label='instance',\n    datasource='$datasource',\n    query='label_values(aws_rds_database_connections_sum{job=~\"$job\", region=~\"$region\"}, dimension_DBInstanceIdentifier)',\n    refresh=common.refreshOnTimeRangeChange,\n    allValue='.+',\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addPanels(\n  [\n    grafana.panel.text.new(\n      title='Info',\n      content=|||\n        Showing metrics only for AWS resources that have tags assigned to them. For more information, see [Amazon CloudWatch Metrics for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/monitoring-cloudwatch.html).\n      |||,\n    )\n    .setGridPos(w=24, h=3),\n\n    grafana.panel.graph.new(\n      title='CPU utilization average',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_cpuutilization_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='CPU utilization maximum',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(\n      format='percent',\n      max=100,\n      min=0,\n    )\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_cpuutilization_maximum{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Database connections average',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_database_connections_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Database connections count',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_database_connections_sum{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Free storage space',\n      datasource='$datasource',\n    )\n    .setGridPos(w=24, h=8)\n    .addYaxis(format='bytes', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_free_storage_space_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Freeable memory',\n      datasource='$datasource',\n    )\n    .setGridPos(w=24, h=8)\n    .addYaxis(format='bytes', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_freeable_memory_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk read throughput (bytes)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='bps', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_read_throughput_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk write throughput (bytes)',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='bps', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_write_throughput_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk read IOPS',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='ops', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_read_iops_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk write IOPS',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='ops', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_write_iops_average{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk read latency',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8)\n    .addYaxis(format='ms', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_read_latency_maximum{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Disk write latency',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12)\n    .addYaxis(format='ms', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='aws_rds_write_latency_maximum{%s}' % [allLabels],\n        legendFormat='{{dimension_DBInstanceIdentifier}}',\n        datasource='$datasource',\n      ),\n    ),\n  ]\n)\n"
  },
  {
    "path": "mixin/dashboards/s3.libsonnet",
    "content": "local common = import 'common.libsonnet';\nlocal grafana = import 'grafonnet-7.0/grafana.libsonnet';\n\nlocal allLabels = 'job=~\"$job\", region=~\"$region\", dimension_BucketName=~\"$bucket\"';\n\ngrafana.dashboard.new(\n  title='AWS S3',\n  description='Visualize Amazon S3 metrics',\n  tags=['Amazon', 'AWS', 'CloudWatch', 'S3'],\n  graphTooltip=common.tooltipSharedCrosshair,\n)\n.addTemplate(\n  grafana.template.datasource.new(\n    name='datasource',\n    query='prometheus',\n    label='Data Source',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='job',\n    label='job',\n    datasource='$datasource',\n    query='label_values(aws_s3_number_of_objects_average, job)',\n    refresh=common.refreshOnPageLoad,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n    allValue='.+',\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='region',\n    label='Region',\n    datasource='$datasource',\n    query='label_values(aws_s3_number_of_objects_average, region)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='bucket',\n    label='Bucket',\n    datasource='$datasource',\n    query='label_values(aws_s3_number_of_objects_average, dimension_BucketName)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addTemplate(\n  grafana.template.query.new(\n    name='filter_id',\n    label='FilterId',\n    datasource='$datasource',\n    query='label_values(aws_s3_all_requests_sum{dimension_BucketName=~\"$bucket\"}, dimension_FilterId)',\n    refresh=common.refreshOnTimeRangeChange,\n    includeAll=true,\n    multi=true,\n    sort=common.sortAlphabeticalAsc,\n  )\n)\n.addPanels(\n  [\n    grafana.panel.text.new(\n      title='Info',\n      content=|||\n        Showing metrics only for AWS resources that have tags assigned to them. For more information, see [Amazon CloudWatch Metrics for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metrics-dimensions.html).\n      |||,\n    )\n    .setGridPos(w=24, h=3),\n\n    grafana.panel.stat.new(\n      title='Total number of objects',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=4, x=0, y=3)\n    .setFieldConfig(min=0)\n    .setOptions(calcs=['lastNotNull'], colorMode='none')\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum(last_over_time(aws_s3_number_of_objects_average{job=~\"$job\"}[1d]) > 0)',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.stat.new(\n      title='Total buckets size',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=4, x=12, y=3)\n    .setFieldConfig(unit='bytes', min=0)\n    .setOptions(calcs=['lastNotNull'], colorMode='none')\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='sum(last_over_time(aws_s3_bucket_size_bytes_average{job=~\"$job\"}[1d]) > 0)',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Number of objects',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=0, y=7)\n    .addYaxis(format='short', min=0, decimals=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='last_over_time(aws_s3_number_of_objects_average{%s}[1d])' % [allLabels],\n        legendFormat='{{dimension_BucketName}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.graph.new(\n      title='Bucket size',\n      datasource='$datasource',\n    )\n    .setGridPos(w=12, h=8, x=12, y=7)\n    .addYaxis(format='bytes', min=0)\n    .addYaxis()\n    .addTarget(\n      grafana.target.prometheus.new(\n        expr='last_over_time(aws_s3_bucket_size_bytes_average{%s}[1d])' % [allLabels],\n        legendFormat='{{dimension_BucketName}}',\n        datasource='$datasource',\n      ),\n    ),\n\n    grafana.panel.row.new(\n      title='Request metrics',\n      datasource='$datasource',\n    )\n    .setGridPos(w=24, h=1, x=0, y=15)\n    .addPanel(\n      grafana.panel.text.new(\n        title='Info',\n        content=|||\n          Enable [Requests metrics](https://docs.aws.amazon.com/AmazonS3/latest/userguide/cloudwatch-monitoring.html) from the AWS console and create a Filter to make sure your requests metrics are reported.\n        |||,\n      )\n      .setGridPos(w=24, h=2, x=0, y=16),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Request latency (p95)',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=0, y=18)\n      .addYaxis(format='ms', min=0, decimals=1)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='rate(aws_s3_total_request_latency_p95{%s, dimension_FilterId=~\"$filter_id\"}[2h]) * 1e3' % [allLabels],\n          legendFormat='{{dimension_BucketName}}',\n          datasource='$datasource',\n        ),\n      ),\n    )\n    .addPanel(\n      grafana.panel.graph.new(\n        title='Errors count',\n        datasource='$datasource',\n      )\n      .setGridPos(w=12, h=8, x=12, y=18)\n      .addYaxis(format='short', min=0, decimals=0)\n      .addYaxis()\n      .addTarget(\n        grafana.target.prometheus.new(\n          expr='aws_s3_4xx_errors_sum{%s, dimension_FilterId=~\"$filter_id\"}' % [allLabels],\n          legendFormat='{{dimension_BucketName}}',\n          datasource='$datasource',\n        ),\n      ),\n    ),\n\n  ]\n)\n"
  },
  {
    "path": "mixin/jsonnetfile.json",
    "content": "{\n  \"version\": 1,\n  \"dependencies\": [\n    {\n      \"source\": {\n        \"git\": {\n          \"remote\": \"https://github.com/grafana/grafonnet-lib.git\",\n          \"subdir\": \"grafonnet-7.0\"\n        }\n      },\n      \"version\": \"master\"\n    }\n  ],\n  \"legacyImports\": true\n}\n"
  },
  {
    "path": "mixin/jsonnetfile.lock.json",
    "content": "{\n  \"version\": 1,\n  \"dependencies\": [\n    {\n      \"source\": {\n        \"git\": {\n          \"remote\": \"https://github.com/grafana/grafonnet-lib.git\",\n          \"subdir\": \"grafonnet-7.0\"\n        }\n      },\n      \"version\": \"30280196507e0fe6fa978a3e0eaca3a62844f817\",\n      \"sum\": \"gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM=\"\n    }\n  ],\n  \"legacyImports\": false\n}\n"
  },
  {
    "path": "mixin/mixin.libsonnet",
    "content": "{\n  local config = import './config.libsonnet',\n  local util = import './util.libsonnet',\n  local mixin = (import './dashboards/all.libsonnet') + config,\n  grafanaDashboards+::\n    {\n      [fname]: util.decorate_dashboard(mixin[fname], tags=['cloudwatch-integration']) + { uid: std.md5(fname) }\n      for fname in std.objectFields(mixin)\n    },\n\n  prometheusAlerts+:: if std.objectHasAll(mixin, 'prometheusAlerts') then mixin.prometheusAlerts else {},\n  prometheusRules+:: if std.objectHasAll(mixin, 'prometheusRules') then mixin.prometheusRules else {},\n}\n"
  },
  {
    "path": "mixin/util.libsonnet",
    "content": "{\n  decorate_dashboard(dashboard, tags, refresh='30s', timeFrom='now-30m')::\n    dashboard {\n      editable: false,\n      id: null,  // If id is set the grafana client will try to update instead of create\n      tags: tags,\n      refresh: refresh,\n      time: {\n        from: timeFrom,\n        to: 'now',\n      },\n      templating: {\n        list+: [\n          if std.objectHas(t, 'query') && t.query == 'prometheus' then t { regex: '(?!grafanacloud-usage|grafanacloud-ml-metrics).+' } else t\n          for t in dashboard.templating.list\n        ],\n      },\n    },\n}\n"
  },
  {
    "path": "pkg/clients/README.md",
    "content": "# Purpose of the clients package\nThe goal of this package is to abstract away as much of the AWS SDK implementation details as possible. YACE uses\n[AWS SDK for Go v2](https://aws.github.io/aws-sdk-go-v2/docs/) exclusively (SDK v1 support was removed in v0.64.0).\n\nThe folder structure isolates common interfaces from their implementations:\n\n```\n/clients: Factory interface and CachingFactory implementation\n/clients/account: account interface and implementation for looking up AWS account info\n/clients/cloudwatch: cloudwatch interface and implementation for gathering metrics data\n/clients/tagging: tagging interface and implementation for discovering resources, including service-specific filters\n```\n\n## /clients/tagging/filters.go serviceFilters\n\n`serviceFilters` are extra definitions for how to lookup or filter resources for certain CloudWatch namespaces which\ncannot be done using only tag data alone. Changes to service filters include:\n\n* Adding a service filter implementation for a new service\n* Modifying the behavior of a `ResourceFunc`\n* Modifying the behavior of a `FilterFunc`\n"
  },
  {
    "path": "pkg/clients/account/client.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage account\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/service/iam\"\n\t\"github.com/aws/aws-sdk-go-v2/service/sts\"\n)\n\ntype Client interface {\n\t// GetAccount returns the AWS account ID for the configured authenticated client.\n\tGetAccount(ctx context.Context) (string, error)\n\n\t// GetAccountAlias returns the account alias if there's one set, otherwise an empty string.\n\tGetAccountAlias(ctx context.Context) (string, error)\n}\n\ntype client struct {\n\tlogger    *slog.Logger\n\tstsClient *sts.Client\n\tiamClient *iam.Client\n}\n\nfunc NewClient(logger *slog.Logger, stsClient *sts.Client, iamClient *iam.Client) Client {\n\treturn &client{\n\t\tlogger:    logger,\n\t\tstsClient: stsClient,\n\t\tiamClient: iamClient,\n\t}\n}\n\nfunc (c client) GetAccount(ctx context.Context) (string, error) {\n\tresult, err := c.stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif result.Account == nil {\n\t\treturn \"\", errors.New(\"aws sts GetCallerIdentity returned no account\")\n\t}\n\treturn *result.Account, nil\n}\n\nfunc (c client) GetAccountAlias(ctx context.Context) (string, error) {\n\tacctAliasOut, err := c.iamClient.ListAccountAliases(ctx, &iam.ListAccountAliasesInput{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpossibleAccountAlias := \"\"\n\n\t// Since a single account can only have one alias, and an authenticated SDK session corresponds to a single account,\n\t// the output can have at most one alias.\n\t// https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListAccountAliases.html\n\tif len(acctAliasOut.AccountAliases) > 0 {\n\t\tpossibleAccountAlias = acctAliasOut.AccountAliases[0]\n\t}\n\n\treturn possibleAccountAlias, nil\n}\n"
  },
  {
    "path": "pkg/clients/cloudwatch/client.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatch\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\taws_cloudwatch \"github.com/aws/aws-sdk-go-v2/service/cloudwatch\"\n\t\"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil\"\n)\n\ntype Client interface {\n\t// ListMetrics returns the list of metrics and dimensions for a given namespace\n\t// and metric name. Results pagination is handled automatically; the caller\n\t// must provide a non-nil handler func that will be invoked for each page of\n\t// results.\n\tListMetrics(ctx context.Context, namespace string, metric *model.MetricConfig, recentlyActiveOnly bool, fn func(page []*model.Metric)) error\n\n\t// GetMetricData returns the output of the GetMetricData CloudWatch API.\n\t// Results pagination is handled automatically.\n\tGetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []MetricDataResult\n\n\t// GetMetricStatistics returns the output of the GetMetricStatistics CloudWatch API.\n\tGetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.MetricStatisticsResult\n}\n\ntype MetricDataResult struct {\n\tID         string\n\tDataPoints []DataPoint\n}\n\ntype DataPoint struct {\n\tValue     *float64\n\tTimestamp time.Time\n}\n\ntype client struct {\n\tlogger        *slog.Logger\n\tcloudwatchAPI *aws_cloudwatch.Client\n}\n\nfunc NewClient(logger *slog.Logger, cloudwatchAPI *aws_cloudwatch.Client) Client {\n\treturn &client{\n\t\tlogger:        logger,\n\t\tcloudwatchAPI: cloudwatchAPI,\n\t}\n}\n\nfunc (c client) ListMetrics(ctx context.Context, namespace string, metric *model.MetricConfig, recentlyActiveOnly bool, fn func(page []*model.Metric)) error {\n\tfilter := &aws_cloudwatch.ListMetricsInput{\n\t\tMetricName: aws.String(metric.Name),\n\t\tNamespace:  aws.String(namespace),\n\t}\n\tif recentlyActiveOnly {\n\t\tfilter.RecentlyActive = types.RecentlyActivePt3h\n\t}\n\n\tc.logger.Debug(\"ListMetrics\", \"input\", filter)\n\n\tpaginator := aws_cloudwatch.NewListMetricsPaginator(c.cloudwatchAPI, filter, func(options *aws_cloudwatch.ListMetricsPaginatorOptions) {\n\t\toptions.StopOnDuplicateToken = true\n\t})\n\n\tfor paginator.HasMorePages() {\n\t\tpromutil.CloudwatchAPICounter.WithLabelValues(\"ListMetrics\").Inc()\n\t\tpage, err := paginator.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tpromutil.CloudwatchAPIErrorCounter.WithLabelValues(\"ListMetrics\").Inc()\n\t\t\tc.logger.Error(\"ListMetrics error\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\n\t\tmetricsPage := toModelMetric(page)\n\t\tc.logger.Debug(\"ListMetrics\", \"output\", metricsPage)\n\n\t\tfn(metricsPage)\n\t}\n\n\treturn nil\n}\n\nfunc toModelMetric(page *aws_cloudwatch.ListMetricsOutput) []*model.Metric {\n\tmodelMetrics := make([]*model.Metric, 0, len(page.Metrics))\n\tfor _, cloudwatchMetric := range page.Metrics {\n\t\tmodelMetric := &model.Metric{\n\t\t\tMetricName: *cloudwatchMetric.MetricName,\n\t\t\tNamespace:  *cloudwatchMetric.Namespace,\n\t\t\tDimensions: toModelDimensions(cloudwatchMetric.Dimensions),\n\t\t}\n\t\tmodelMetrics = append(modelMetrics, modelMetric)\n\t}\n\treturn modelMetrics\n}\n\nfunc toModelDimensions(dimensions []types.Dimension) []model.Dimension {\n\tmodelDimensions := make([]model.Dimension, 0, len(dimensions))\n\tfor _, dimension := range dimensions {\n\t\tmodelDimension := model.Dimension{\n\t\t\tName:  *dimension.Name,\n\t\t\tValue: *dimension.Value,\n\t\t}\n\t\tmodelDimensions = append(modelDimensions, modelDimension)\n\t}\n\treturn modelDimensions\n}\n\nfunc (c client) GetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []MetricDataResult {\n\tmetricDataQueries := make([]types.MetricDataQuery, 0, len(getMetricData))\n\texportAllDataPoints := false\n\tfor _, data := range getMetricData {\n\t\tmetricStat := &types.MetricStat{\n\t\t\tMetric: &types.Metric{\n\t\t\t\tDimensions: toCloudWatchDimensions(data.Dimensions),\n\t\t\t\tMetricName: &data.MetricName,\n\t\t\t\tNamespace:  &namespace,\n\t\t\t},\n\t\t\tPeriod: aws.Int32(int32(data.GetMetricDataProcessingParams.Period)),\n\t\t\tStat:   &data.GetMetricDataProcessingParams.Statistic,\n\t\t}\n\t\tmetricDataQueries = append(metricDataQueries, types.MetricDataQuery{\n\t\t\tId:         &data.GetMetricDataProcessingParams.QueryID,\n\t\t\tMetricStat: metricStat,\n\t\t\tReturnData: aws.Bool(true),\n\t\t})\n\t\texportAllDataPoints = exportAllDataPoints || data.MetricMigrationParams.ExportAllDataPoints\n\t}\n\n\tinput := &aws_cloudwatch.GetMetricDataInput{\n\t\tEndTime:           &endTime,\n\t\tStartTime:         &startTime,\n\t\tMetricDataQueries: metricDataQueries,\n\t\tScanBy:            \"TimestampDescending\",\n\t}\n\tvar resp aws_cloudwatch.GetMetricDataOutput\n\tpromutil.CloudwatchGetMetricDataAPIMetricsCounter.Add(float64(len(input.MetricDataQueries)))\n\tc.logger.Debug(\"GetMetricData\", \"input\", input)\n\n\tpaginator := aws_cloudwatch.NewGetMetricDataPaginator(c.cloudwatchAPI, input, func(options *aws_cloudwatch.GetMetricDataPaginatorOptions) {\n\t\toptions.StopOnDuplicateToken = true\n\t})\n\tfor paginator.HasMorePages() {\n\t\tpromutil.CloudwatchAPICounter.WithLabelValues(\"GetMetricData\").Inc()\n\t\tpromutil.CloudwatchGetMetricDataAPICounter.Inc()\n\n\t\tpage, err := paginator.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tpromutil.CloudwatchAPIErrorCounter.WithLabelValues(\"GetMetricData\").Inc()\n\t\t\tc.logger.Error(\"GetMetricData error\", \"err\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresp.MetricDataResults = append(resp.MetricDataResults, page.MetricDataResults...)\n\t}\n\n\tc.logger.Debug(\"GetMetricData\", \"output\", resp)\n\n\treturn toMetricDataResult(resp, exportAllDataPoints)\n}\n\nfunc toMetricDataResult(resp aws_cloudwatch.GetMetricDataOutput, exportAllDataPoints bool) []MetricDataResult {\n\toutput := make([]MetricDataResult, 0, len(resp.MetricDataResults))\n\tfor _, metricDataResult := range resp.MetricDataResults {\n\t\tmappedResult := MetricDataResult{\n\t\t\tID:         *metricDataResult.Id,\n\t\t\tDataPoints: make([]DataPoint, 0, len(metricDataResult.Timestamps)),\n\t\t}\n\t\tfor i := 0; i < len(metricDataResult.Timestamps); i++ {\n\t\t\tmappedResult.DataPoints = append(mappedResult.DataPoints, DataPoint{\n\t\t\t\tValue:     &metricDataResult.Values[i],\n\t\t\t\tTimestamp: metricDataResult.Timestamps[i],\n\t\t\t})\n\n\t\t\tif !exportAllDataPoints {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\toutput = append(output, mappedResult)\n\t}\n\treturn output\n}\n\nfunc (c client) GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.MetricStatisticsResult {\n\tfilter := createGetMetricStatisticsInput(logger, dimensions, &namespace, metric)\n\tc.logger.Debug(\"GetMetricStatistics\", \"input\", filter)\n\n\tresp, err := c.cloudwatchAPI.GetMetricStatistics(ctx, filter)\n\n\tc.logger.Debug(\"GetMetricStatistics\", \"output\", resp)\n\n\tpromutil.CloudwatchAPICounter.WithLabelValues(\"GetMetricStatistics\").Inc()\n\tpromutil.CloudwatchGetMetricStatisticsAPICounter.Inc()\n\n\tif err != nil {\n\t\tpromutil.CloudwatchAPIErrorCounter.WithLabelValues(\"GetMetricStatistics\").Inc()\n\t\tc.logger.Error(\"Failed to get metric statistics\", \"err\", err)\n\t\treturn nil\n\t}\n\n\tptrs := make([]*types.Datapoint, 0, len(resp.Datapoints))\n\tfor _, datapoint := range resp.Datapoints {\n\t\tptrs = append(ptrs, &datapoint)\n\t}\n\n\treturn toModelDataPoints(ptrs)\n}\n\nfunc toModelDataPoints(cwDataPoints []*types.Datapoint) []*model.MetricStatisticsResult {\n\tmodelDataPoints := make([]*model.MetricStatisticsResult, 0, len(cwDataPoints))\n\n\tfor _, cwDatapoint := range cwDataPoints {\n\t\textendedStats := make(map[string]*float64, len(cwDatapoint.ExtendedStatistics))\n\t\tfor name, value := range cwDatapoint.ExtendedStatistics {\n\t\t\textendedStats[name] = &value\n\t\t}\n\t\tmodelDataPoints = append(modelDataPoints, &model.MetricStatisticsResult{\n\t\t\tAverage:            cwDatapoint.Average,\n\t\t\tExtendedStatistics: extendedStats,\n\t\t\tMaximum:            cwDatapoint.Maximum,\n\t\t\tMinimum:            cwDatapoint.Minimum,\n\t\t\tSampleCount:        cwDatapoint.SampleCount,\n\t\t\tSum:                cwDatapoint.Sum,\n\t\t\tTimestamp:          cwDatapoint.Timestamp,\n\t\t})\n\t}\n\treturn modelDataPoints\n}\n"
  },
  {
    "path": "pkg/clients/cloudwatch/client_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatch\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\taws_cloudwatch \"github.com/aws/aws-sdk-go-v2/service/cloudwatch\"\n\t\"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc Test_toMetricDataResult(t *testing.T) {\n\tts := time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)\n\n\ttype testCase struct {\n\t\tname                      string\n\t\texportAllDataPoints       bool\n\t\tgetMetricDataOutput       aws_cloudwatch.GetMetricDataOutput\n\t\texpectedMetricDataResults []MetricDataResult\n\t}\n\n\ttestCases := []testCase{\n\t\t{\n\t\t\tname:                \"all metrics present\",\n\t\t\texportAllDataPoints: false,\n\t\t\tgetMetricDataOutput: aws_cloudwatch.GetMetricDataOutput{\n\t\t\t\tMetricDataResults: []types.MetricDataResult{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-1\"),\n\t\t\t\t\t\tValues:     []float64{1.0, 2.0, 3.0},\n\t\t\t\t\t\tTimestamps: []time.Time{ts.Add(10 * time.Minute), ts.Add(5 * time.Minute), ts},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-2\"),\n\t\t\t\t\t\tValues:     []float64{2.0},\n\t\t\t\t\t\tTimestamps: []time.Time{ts},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetricDataResults: []MetricDataResult{\n\t\t\t\t{\n\t\t\t\t\tID: \"metric-1\", DataPoints: []DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(1.0), Timestamp: ts.Add(10 * time.Minute)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID: \"metric-2\", DataPoints: []DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(2.0), Timestamp: ts},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                \"metric with no values\",\n\t\t\texportAllDataPoints: false,\n\t\t\tgetMetricDataOutput: aws_cloudwatch.GetMetricDataOutput{\n\t\t\t\tMetricDataResults: []types.MetricDataResult{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-1\"),\n\t\t\t\t\t\tValues:     []float64{1.0, 2.0, 3.0},\n\t\t\t\t\t\tTimestamps: []time.Time{ts.Add(10 * time.Minute), ts.Add(5 * time.Minute), ts},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-2\"),\n\t\t\t\t\t\tValues:     []float64{},\n\t\t\t\t\t\tTimestamps: []time.Time{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetricDataResults: []MetricDataResult{\n\t\t\t\t{\n\t\t\t\t\tID: \"metric-1\", DataPoints: []DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(1.0), Timestamp: ts.Add(10 * time.Minute)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:         \"metric-2\",\n\t\t\t\t\tDataPoints: []DataPoint{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                \"export all data points\",\n\t\t\texportAllDataPoints: true,\n\t\t\tgetMetricDataOutput: aws_cloudwatch.GetMetricDataOutput{\n\t\t\t\tMetricDataResults: []types.MetricDataResult{\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-1\"),\n\t\t\t\t\t\tValues:     []float64{1.0, 2.0, 3.0},\n\t\t\t\t\t\tTimestamps: []time.Time{ts.Add(10 * time.Minute), ts.Add(5 * time.Minute), ts},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tId:         aws.String(\"metric-2\"),\n\t\t\t\t\t\tValues:     []float64{2.0},\n\t\t\t\t\t\tTimestamps: []time.Time{ts},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetricDataResults: []MetricDataResult{\n\t\t\t\t{\n\t\t\t\t\tID: \"metric-1\", DataPoints: []DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(1.0), Timestamp: ts.Add(10 * time.Minute)},\n\t\t\t\t\t\t{Value: aws.Float64(2.0), Timestamp: ts.Add(5 * time.Minute)},\n\t\t\t\t\t\t{Value: aws.Float64(3.0), Timestamp: ts},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID: \"metric-2\", DataPoints: []DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(2.0), Timestamp: ts},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tmetricDataResults := toMetricDataResult(tc.getMetricDataOutput, tc.exportAllDataPoints)\n\t\t\trequire.Equal(t, tc.expectedMetricDataResults, metricDataResults)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/clients/cloudwatch/concurrency_client.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatch\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nconst (\n\tlistMetricsCall         = \"ListMetrics\"\n\tgetMetricDataCall       = \"GetMetricData\"\n\tgetMetricStatisticsCall = \"GetMetricStatistics\"\n)\n\n// ConcurrencyLimiter limits the concurrency when calling AWS CloudWatch APIs. The functions implemented\n// by this interface follow the same as a normal semaphore, but accept and operation identifier. Some\n// implementations might use this to keep a different semaphore, with different reentrance values, per\n// operation.\ntype ConcurrencyLimiter interface {\n\t// Acquire takes one \"ticket\" from the concurrency limiter for op. If there's none available, the caller\n\t// routine will be blocked until there's room available.\n\tAcquire(op string)\n\n\t// Release gives back one \"ticket\" to the concurrency limiter identified by op. If there's one or more\n\t// routines waiting for one, one will be woken up.\n\tRelease(op string)\n}\n\ntype limitedConcurrencyClient struct {\n\tclient  Client\n\tlimiter ConcurrencyLimiter\n}\n\nfunc NewLimitedConcurrencyClient(client Client, limiter ConcurrencyLimiter) Client {\n\treturn &limitedConcurrencyClient{\n\t\tclient:  client,\n\t\tlimiter: limiter,\n\t}\n}\n\nfunc (c limitedConcurrencyClient) ListMetrics(ctx context.Context, namespace string, metric *model.MetricConfig, recentlyActiveOnly bool, fn func(page []*model.Metric)) error {\n\tc.limiter.Acquire(listMetricsCall)\n\terr := c.client.ListMetrics(ctx, namespace, metric, recentlyActiveOnly, fn)\n\tc.limiter.Release(listMetricsCall)\n\treturn err\n}\n\nfunc (c limitedConcurrencyClient) GetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []MetricDataResult {\n\tc.limiter.Acquire(getMetricDataCall)\n\tres := c.client.GetMetricData(ctx, getMetricData, namespace, startTime, endTime)\n\tc.limiter.Release(getMetricDataCall)\n\treturn res\n}\n\nfunc (c limitedConcurrencyClient) GetMetricStatistics(ctx context.Context, logger *slog.Logger, dimensions []model.Dimension, namespace string, metric *model.MetricConfig) []*model.MetricStatisticsResult {\n\tc.limiter.Acquire(getMetricStatisticsCall)\n\tres := c.client.GetMetricStatistics(ctx, logger, dimensions, namespace, metric)\n\tc.limiter.Release(getMetricStatisticsCall)\n\treturn res\n}\n\n// ConcurrencyConfig configures how concurrency should be limited in a Cloudwatch API client. It allows\n// one to pick between different limiter implementations: a single limit limiter, or one with a different limit per\n// API call.\ntype ConcurrencyConfig struct {\n\t// PerAPIEnabled configures whether to have a limit per API call.\n\tPerAPILimitEnabled bool\n\n\t// SingleLimit configures the concurrency limit when using a single limiter for api calls.\n\tSingleLimit int\n\n\t// ListMetrics limits the number for ListMetrics API concurrent API calls.\n\tListMetrics int\n\n\t// GetMetricData limits the number for GetMetricData API concurrent API calls.\n\tGetMetricData int\n\n\t// GetMetricStatistics limits the number for GetMetricStatistics API concurrent API calls.\n\tGetMetricStatistics int\n}\n\n// semaphore implements a simple semaphore using a channel.\ntype semaphore chan struct{}\n\n// newSemaphore creates a new semaphore with the given limit.\nfunc newSemaphore(limit int) semaphore {\n\treturn make(semaphore, limit)\n}\n\nfunc (s semaphore) Acquire() {\n\ts <- struct{}{}\n}\n\nfunc (s semaphore) Release() {\n\t<-s\n}\n\n// NewLimiter creates a new ConcurrencyLimiter, according to the ConcurrencyConfig.\nfunc (cfg ConcurrencyConfig) NewLimiter() ConcurrencyLimiter {\n\tif cfg.PerAPILimitEnabled {\n\t\treturn NewPerAPICallLimiter(cfg.ListMetrics, cfg.GetMetricData, cfg.GetMetricStatistics)\n\t}\n\treturn NewSingleLimiter(cfg.SingleLimit)\n}\n\n// perAPICallLimiter is a ConcurrencyLimiter that keeps a different concurrency limiter per different API call. This allows\n// a more granular control of concurrency, allowing us to take advantage of different api limits. For example, ListMetrics\n// has a limit of 25 TPS, while GetMetricData has none.\ntype perAPICallLimiter struct {\n\tlistMetricsLimiter          semaphore\n\tgetMetricsDataLimiter       semaphore\n\tgetMetricsStatisticsLimiter semaphore\n}\n\n// NewPerAPICallLimiter creates a new PerAPICallLimiter.\nfunc NewPerAPICallLimiter(listMetrics, getMetricData, getMetricStatistics int) ConcurrencyLimiter {\n\treturn &perAPICallLimiter{\n\t\tlistMetricsLimiter:          newSemaphore(listMetrics),\n\t\tgetMetricsDataLimiter:       newSemaphore(getMetricData),\n\t\tgetMetricsStatisticsLimiter: newSemaphore(getMetricStatistics),\n\t}\n}\n\nfunc (l *perAPICallLimiter) Acquire(op string) {\n\tswitch op {\n\tcase listMetricsCall:\n\t\tl.listMetricsLimiter.Acquire()\n\tcase getMetricDataCall:\n\t\tl.getMetricsDataLimiter.Acquire()\n\tcase getMetricStatisticsCall:\n\t\tl.getMetricsStatisticsLimiter.Acquire()\n\t}\n}\n\nfunc (l *perAPICallLimiter) Release(op string) {\n\tswitch op {\n\tcase listMetricsCall:\n\t\tl.listMetricsLimiter.Release()\n\tcase getMetricDataCall:\n\t\tl.getMetricsDataLimiter.Release()\n\tcase getMetricStatisticsCall:\n\t\tl.getMetricsStatisticsLimiter.Release()\n\t}\n}\n\n// singleLimiter is the current implementation of ConcurrencyLimiter, which has a single limit for all different API calls.\ntype singleLimiter struct {\n\ts semaphore\n}\n\n// NewSingleLimiter creates a new SingleLimiter.\nfunc NewSingleLimiter(limit int) ConcurrencyLimiter {\n\treturn &singleLimiter{\n\t\ts: newSemaphore(limit),\n\t}\n}\n\nfunc (sl *singleLimiter) Acquire(_ string) {\n\tsl.s.Acquire()\n}\n\nfunc (sl *singleLimiter) Release(_ string) {\n\tsl.s.Release()\n}\n"
  },
  {
    "path": "pkg/clients/cloudwatch/input.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatch\n\nimport (\n\t\"log/slog\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\taws_cloudwatch \"github.com/aws/aws-sdk-go-v2/service/cloudwatch\"\n\t\"github.com/aws/aws-sdk-go-v2/service/cloudwatch/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil\"\n)\n\nfunc toCloudWatchDimensions(dimensions []model.Dimension) []types.Dimension {\n\tcwDim := make([]types.Dimension, 0, len(dimensions))\n\tfor _, dim := range dimensions {\n\t\t// Don't take pointers directly to loop variables\n\t\tcDim := dim\n\t\tcwDim = append(cwDim, types.Dimension{\n\t\t\tName:  &cDim.Name,\n\t\t\tValue: &cDim.Value,\n\t\t})\n\t}\n\treturn cwDim\n}\n\nfunc createGetMetricStatisticsInput(logger *slog.Logger, dimensions []model.Dimension, namespace *string, metric *model.MetricConfig) *aws_cloudwatch.GetMetricStatisticsInput {\n\tperiod := metric.Period\n\tlength := metric.Length\n\tdelay := metric.Delay\n\tendTime := time.Now().Add(-time.Duration(delay) * time.Second)\n\tstartTime := time.Now().Add(-(time.Duration(length) + time.Duration(delay)) * time.Second)\n\n\tvar statistics []types.Statistic\n\tvar extendedStatistics []string\n\tfor _, statistic := range metric.Statistics {\n\t\tif promutil.Percentile.MatchString(statistic) {\n\t\t\textendedStatistics = append(extendedStatistics, statistic)\n\t\t} else {\n\t\t\tstatistics = append(statistics, types.Statistic(statistic))\n\t\t}\n\t}\n\n\toutput := &aws_cloudwatch.GetMetricStatisticsInput{\n\t\tDimensions:         toCloudWatchDimensions(dimensions),\n\t\tNamespace:          namespace,\n\t\tStartTime:          &startTime,\n\t\tEndTime:            &endTime,\n\t\tPeriod:             aws.Int32(int32(period)),\n\t\tMetricName:         &metric.Name,\n\t\tStatistics:         statistics,\n\t\tExtendedStatistics: extendedStatistics,\n\t}\n\n\tlogger.Debug(\"CLI helper - \" +\n\t\t\"aws cloudwatch get-metric-statistics\" +\n\t\t\" --metric-name \" + metric.Name +\n\t\t\" --dimensions \" + dimensionsToCliString(dimensions) +\n\t\t\" --namespace \" + *namespace +\n\t\t\" --statistics \" + string(statistics[0]) +\n\t\t\" --period \" + strconv.FormatInt(period, 10) +\n\t\t\" --start-time \" + startTime.Format(time.RFC3339) +\n\t\t\" --end-time \" + endTime.Format(time.RFC3339))\n\n\tlogger.Debug(\"createGetMetricStatisticsInput\", \"output\", *output)\n\n\treturn output\n}\n\nfunc dimensionsToCliString(dimensions []model.Dimension) string {\n\tout := strings.Builder{}\n\tfor _, dim := range dimensions {\n\t\tout.WriteString(\"Name=\")\n\t\tout.WriteString(dim.Name)\n\t\tout.WriteString(\",Value=\")\n\t\tout.WriteString(dim.Value)\n\t\tout.WriteString(\" \")\n\t}\n\treturn out.String()\n}\n"
  },
  {
    "path": "pkg/clients/factory.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage clients\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/aws/retry\"\n\taws_config \"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials/stscreds\"\n\t\"github.com/aws/aws-sdk-go-v2/service/amp\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigatewayv2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/autoscaling\"\n\t\"github.com/aws/aws-sdk-go-v2/service/cloudwatch\"\n\t\"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/iam\"\n\t\"github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi\"\n\t\"github.com/aws/aws-sdk-go-v2/service/shield\"\n\t\"github.com/aws/aws-sdk-go-v2/service/storagegateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/sts\"\n\taws_logging \"github.com/aws/smithy-go/logging\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account\"\n\tcloudwatch_client \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// Factory is an interface to abstract away all logic required to produce the different\n// YACE specific clients which wrap AWS clients\ntype Factory interface {\n\tGetCloudwatchClient(region string, role model.Role, concurrency cloudwatch_client.ConcurrencyConfig) cloudwatch_client.Client\n\tGetTaggingClient(region string, role model.Role, concurrencyLimit int) tagging.Client\n\tGetAccountClient(region string, role model.Role) account.Client\n}\n\ntype awsRegion = string\n\ntype CachingFactory struct {\n\tlogger              *slog.Logger\n\tstsOptions          func(*sts.Options)\n\tclients             map[model.Role]map[awsRegion]*cachedClients\n\tmu                  sync.Mutex\n\trefreshed           *atomic.Bool\n\tcleared             *atomic.Bool\n\tfipsEnabled         bool\n\tendpointURLOverride string\n}\n\ntype cachedClients struct {\n\tawsConfig *aws.Config\n\t// if we know that this job is only used for static\n\t// then we don't have to construct as many cached connections\n\t// later on\n\tonlyStatic bool\n\tcloudwatch cloudwatch_client.Client\n\ttagging    tagging.Client\n\taccount    account.Client\n}\n\n// Ensure the struct properly implements the interface\nvar _ Factory = &CachingFactory{}\n\n// NewFactory creates a new client factory to use when fetching data from AWS with sdk v2\nfunc NewFactory(logger *slog.Logger, jobsCfg model.JobsConfig, fips bool) (*CachingFactory, error) {\n\tvar options []func(*aws_config.LoadOptions) error\n\toptions = append(options, aws_config.WithLogger(aws_logging.LoggerFunc(func(classification aws_logging.Classification, format string, v ...interface{}) {\n\t\tswitch classification {\n\t\tcase aws_logging.Debug:\n\t\t\tif logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\t\tlogger.Debug(fmt.Sprintf(format, v...))\n\t\t\t}\n\t\tcase aws_logging.Warn:\n\t\t\tlogger.Warn(fmt.Sprintf(format, v...))\n\t\tdefault: // AWS logging only supports debug or warn, log everything else as error\n\t\t\tlogger.Error(fmt.Sprintf(format, v...), \"err\", \"unexpected aws error classification\", \"classification\", classification)\n\t\t}\n\t})))\n\n\toptions = append(options, aws_config.WithLogConfigurationWarnings(true))\n\n\tendpointURLOverride := os.Getenv(\"AWS_ENDPOINT_URL\")\n\n\toptions = append(options, aws_config.WithRetryMaxAttempts(5))\n\n\tc, err := aws_config.LoadDefaultConfig(context.TODO(), options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load default aws config: %w\", err)\n\t}\n\n\tstsOptions := createStsOptions(jobsCfg.StsRegion, logger.Enabled(context.Background(), slog.LevelDebug), endpointURLOverride, fips)\n\tcache := map[model.Role]map[awsRegion]*cachedClients{}\n\tfor _, discoveryJob := range jobsCfg.DiscoveryJobs {\n\t\tfor _, role := range discoveryJob.Roles {\n\t\t\tif _, ok := cache[role]; !ok {\n\t\t\t\tcache[role] = map[awsRegion]*cachedClients{}\n\t\t\t}\n\t\t\tfor _, region := range discoveryJob.Regions {\n\t\t\t\tregionConfig := awsConfigForRegion(role, &c, region, stsOptions)\n\t\t\t\tcache[role][region] = &cachedClients{\n\t\t\t\t\tawsConfig:  regionConfig,\n\t\t\t\t\tonlyStatic: false,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, staticJob := range jobsCfg.StaticJobs {\n\t\tfor _, role := range staticJob.Roles {\n\t\t\tif _, ok := cache[role]; !ok {\n\t\t\t\tcache[role] = map[awsRegion]*cachedClients{}\n\t\t\t}\n\t\t\tfor _, region := range staticJob.Regions {\n\t\t\t\t// Discovery job client definitions have precedence\n\t\t\t\tif _, exists := cache[role][region]; !exists {\n\t\t\t\t\tregionConfig := awsConfigForRegion(role, &c, region, stsOptions)\n\t\t\t\t\tcache[role][region] = &cachedClients{\n\t\t\t\t\t\tawsConfig:  regionConfig,\n\t\t\t\t\t\tonlyStatic: true,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, customNamespaceJob := range jobsCfg.CustomNamespaceJobs {\n\t\tfor _, role := range customNamespaceJob.Roles {\n\t\t\tif _, ok := cache[role]; !ok {\n\t\t\t\tcache[role] = map[awsRegion]*cachedClients{}\n\t\t\t}\n\t\t\tfor _, region := range customNamespaceJob.Regions {\n\t\t\t\t// Discovery job client definitions have precedence\n\t\t\t\tif _, exists := cache[role][region]; !exists {\n\t\t\t\t\tregionConfig := awsConfigForRegion(role, &c, region, stsOptions)\n\t\t\t\t\tcache[role][region] = &cachedClients{\n\t\t\t\t\t\tawsConfig:  regionConfig,\n\t\t\t\t\t\tonlyStatic: true,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &CachingFactory{\n\t\tlogger:              logger,\n\t\tclients:             cache,\n\t\tfipsEnabled:         fips,\n\t\tstsOptions:          stsOptions,\n\t\tendpointURLOverride: endpointURLOverride,\n\t\tcleared:             atomic.NewBool(false),\n\t\trefreshed:           atomic.NewBool(false),\n\t}, nil\n}\n\nfunc (c *CachingFactory) GetCloudwatchClient(region string, role model.Role, concurrency cloudwatch_client.ConcurrencyConfig) cloudwatch_client.Client {\n\tif !c.refreshed.Load() {\n\t\t// if we have not refreshed then we need to lock in case we are accessing concurrently\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t}\n\tif client := c.clients[role][region].cloudwatch; client != nil {\n\t\treturn cloudwatch_client.NewLimitedConcurrencyClient(client, concurrency.NewLimiter())\n\t}\n\tc.clients[role][region].cloudwatch = cloudwatch_client.NewClient(c.logger, c.createCloudwatchClient(c.clients[role][region].awsConfig))\n\treturn cloudwatch_client.NewLimitedConcurrencyClient(c.clients[role][region].cloudwatch, concurrency.NewLimiter())\n}\n\nfunc (c *CachingFactory) GetTaggingClient(region string, role model.Role, concurrencyLimit int) tagging.Client {\n\tif !c.refreshed.Load() {\n\t\t// if we have not refreshed then we need to lock in case we are accessing concurrently\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t}\n\tif client := c.clients[role][region].tagging; client != nil {\n\t\treturn tagging.NewLimitedConcurrencyClient(client, concurrencyLimit)\n\t}\n\tc.clients[role][region].tagging = tagging.NewClient(\n\t\tc.logger,\n\t\tc.createTaggingClient(c.clients[role][region].awsConfig),\n\t\tc.createAutoScalingClient(c.clients[role][region].awsConfig),\n\t\tc.createAPIGatewayClient(c.clients[role][region].awsConfig),\n\t\tc.createAPIGatewayV2Client(c.clients[role][region].awsConfig),\n\t\tc.createEC2Client(c.clients[role][region].awsConfig),\n\t\tc.createDMSClient(c.clients[role][region].awsConfig),\n\t\tc.createPrometheusClient(c.clients[role][region].awsConfig),\n\t\tc.createStorageGatewayClient(c.clients[role][region].awsConfig),\n\t\tc.createShieldClient(c.clients[role][region].awsConfig),\n\t)\n\treturn tagging.NewLimitedConcurrencyClient(c.clients[role][region].tagging, concurrencyLimit)\n}\n\nfunc (c *CachingFactory) GetAccountClient(region string, role model.Role) account.Client {\n\tif !c.refreshed.Load() {\n\t\t// if we have not refreshed then we need to lock in case we are accessing concurrently\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t}\n\tif client := c.clients[role][region].account; client != nil {\n\t\treturn client\n\t}\n\n\tstsClient := c.createStsClient(c.clients[role][region].awsConfig)\n\tiamClient := c.createIAMClient(c.clients[role][region].awsConfig)\n\tc.clients[role][region].account = account.NewClient(c.logger, stsClient, iamClient)\n\treturn c.clients[role][region].account\n}\n\nfunc (c *CachingFactory) Refresh() {\n\tif c.refreshed.Load() {\n\t\treturn\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\t// Avoid double refresh in the event Refresh() is called concurrently\n\tif c.refreshed.Load() {\n\t\treturn\n\t}\n\n\tfor _, regionClients := range c.clients {\n\t\tfor _, cache := range regionClients {\n\t\t\tcache.cloudwatch = cloudwatch_client.NewClient(c.logger, c.createCloudwatchClient(cache.awsConfig))\n\t\t\tif cache.onlyStatic {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcache.tagging = tagging.NewClient(\n\t\t\t\tc.logger,\n\t\t\t\tc.createTaggingClient(cache.awsConfig),\n\t\t\t\tc.createAutoScalingClient(cache.awsConfig),\n\t\t\t\tc.createAPIGatewayClient(cache.awsConfig),\n\t\t\t\tc.createAPIGatewayV2Client(cache.awsConfig),\n\t\t\t\tc.createEC2Client(cache.awsConfig),\n\t\t\t\tc.createDMSClient(cache.awsConfig),\n\t\t\t\tc.createPrometheusClient(cache.awsConfig),\n\t\t\t\tc.createStorageGatewayClient(cache.awsConfig),\n\t\t\t\tc.createShieldClient(cache.awsConfig),\n\t\t\t)\n\n\t\t\tcache.account = account.NewClient(c.logger, c.createStsClient(cache.awsConfig), c.createIAMClient(cache.awsConfig))\n\t\t}\n\t}\n\n\tc.refreshed.Store(true)\n\tc.cleared.Store(false)\n}\n\nfunc (c *CachingFactory) Clear() {\n\tif c.cleared.Load() {\n\t\treturn\n\t}\n\t// Prevent concurrent reads/write if clear is called during execution\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\t// Avoid double clear in the event Refresh() is called concurrently\n\tif c.cleared.Load() {\n\t\treturn\n\t}\n\n\tfor _, regions := range c.clients {\n\t\tfor _, cache := range regions {\n\t\t\tcache.cloudwatch = nil\n\t\t\tcache.account = nil\n\t\t\tcache.tagging = nil\n\t\t}\n\t}\n\n\tc.refreshed.Store(false)\n\tc.cleared.Store(true)\n}\n\n// GetAWSRegionalConfig returns the aws.Config for the given region and role. It implements the RegionalConfigProvider interface.\nfunc (c *CachingFactory) GetAWSRegionalConfig(region string, role model.Role) *aws.Config {\n\treturn c.clients[role][region].awsConfig\n}\n\nfunc (c *CachingFactory) createCloudwatchClient(regionConfig *aws.Config) *cloudwatch.Client {\n\treturn cloudwatch.NewFromConfig(*regionConfig, func(options *cloudwatch.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\n\t\t// Setting an explicit retryer will override the default settings on the config\n\t\toptions.Retryer = retry.NewStandard(func(options *retry.StandardOptions) {\n\t\t\toptions.MaxAttempts = 5\n\t\t\toptions.MaxBackoff = 3 * time.Second\n\t\t})\n\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createTaggingClient(regionConfig *aws.Config) *resourcegroupstaggingapi.Client {\n\treturn resourcegroupstaggingapi.NewFromConfig(*regionConfig, func(options *resourcegroupstaggingapi.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\t// The FIPS setting is ignored because FIPS is not available for resource groups tagging apis\n\t\t// If enabled the SDK will try to use non-existent FIPS URLs, https://github.com/aws/aws-sdk-go-v2/issues/2138#issuecomment-1570791988\n\t\t// AWS FIPS Reference: https://aws.amazon.com/compliance/fips/\n\t})\n}\n\nfunc (c *CachingFactory) createAutoScalingClient(assumedConfig *aws.Config) *autoscaling.Client {\n\treturn autoscaling.NewFromConfig(*assumedConfig, func(options *autoscaling.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\t// The FIPS setting is ignored because FIPS is not available for EC2 autoscaling apis\n\t\t// If enabled the SDK will try to use non-existent FIPS URLs, https://github.com/aws/aws-sdk-go-v2/issues/2138#issuecomment-1570791988\n\t\t// AWS FIPS Reference: https://aws.amazon.com/compliance/fips/\n\t\t// \tEC2 autoscaling has FIPS compliant URLs for govcloud, but they do not use any FIPS prefixing, and should work\n\t\t//\twith sdk v2s EndpointResolverV2\n\t})\n}\n\nfunc (c *CachingFactory) createAPIGatewayClient(assumedConfig *aws.Config) *apigateway.Client {\n\treturn apigateway.NewFromConfig(*assumedConfig, func(options *apigateway.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createAPIGatewayV2Client(assumedConfig *aws.Config) *apigatewayv2.Client {\n\treturn apigatewayv2.NewFromConfig(*assumedConfig, func(options *apigatewayv2.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createEC2Client(assumedConfig *aws.Config) *ec2.Client {\n\treturn ec2.NewFromConfig(*assumedConfig, func(options *ec2.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createDMSClient(assumedConfig *aws.Config) *databasemigrationservice.Client {\n\treturn databasemigrationservice.NewFromConfig(*assumedConfig, func(options *databasemigrationservice.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createStorageGatewayClient(assumedConfig *aws.Config) *storagegateway.Client {\n\treturn storagegateway.NewFromConfig(*assumedConfig, func(options *storagegateway.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc (c *CachingFactory) createPrometheusClient(assumedConfig *aws.Config) *amp.Client {\n\treturn amp.NewFromConfig(*assumedConfig, func(options *amp.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\t// The FIPS setting is ignored because FIPS is not available for amp apis\n\t\t// If enabled the SDK will try to use non-existent FIPS URLs, https://github.com/aws/aws-sdk-go-v2/issues/2138#issuecomment-1570791988\n\t\t// AWS FIPS Reference: https://aws.amazon.com/compliance/fips/\n\t})\n}\n\nfunc (c *CachingFactory) createStsClient(awsConfig *aws.Config) *sts.Client {\n\treturn sts.NewFromConfig(*awsConfig, c.stsOptions)\n}\n\nfunc (c *CachingFactory) createIAMClient(awsConfig *aws.Config) *iam.Client {\n\treturn iam.NewFromConfig(*awsConfig)\n}\n\nfunc (c *CachingFactory) createShieldClient(awsConfig *aws.Config) *shield.Client {\n\treturn shield.NewFromConfig(*awsConfig, func(options *shield.Options) {\n\t\tif c.logger != nil && c.logger.Enabled(context.Background(), slog.LevelDebug) {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif c.endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(c.endpointURLOverride)\n\t\t}\n\t\tif c.fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t})\n}\n\nfunc createStsOptions(stsRegion string, isDebugLoggingEnabled bool, endpointURLOverride string, fipsEnabled bool) func(*sts.Options) {\n\treturn func(options *sts.Options) {\n\t\tif stsRegion != \"\" {\n\t\t\toptions.Region = stsRegion\n\t\t}\n\t\tif isDebugLoggingEnabled {\n\t\t\toptions.ClientLogMode = aws.LogRequestWithBody | aws.LogResponseWithBody\n\t\t}\n\t\tif endpointURLOverride != \"\" {\n\t\t\toptions.BaseEndpoint = aws.String(endpointURLOverride)\n\t\t}\n\t\tif fipsEnabled {\n\t\t\toptions.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled\n\t\t}\n\t}\n}\n\nvar defaultRole = model.Role{}\n\nfunc awsConfigForRegion(r model.Role, c *aws.Config, region awsRegion, stsOptions func(*sts.Options)) *aws.Config {\n\tregionalConfig := c.Copy()\n\tregionalConfig.Region = region\n\n\tif r == defaultRole {\n\t\treturn &regionalConfig\n\t}\n\n\t// based on https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials/stscreds#hdr-Assume_Role\n\t// found via https://github.com/aws/aws-sdk-go-v2/issues/1382\n\tregionalSts := sts.NewFromConfig(*c, stsOptions)\n\tcredentials := stscreds.NewAssumeRoleProvider(regionalSts, r.RoleArn, func(options *stscreds.AssumeRoleOptions) {\n\t\tif r.ExternalID != \"\" {\n\t\t\toptions.ExternalID = aws.String(r.ExternalID)\n\t\t}\n\t})\n\tregionalConfig.Credentials = aws.NewCredentialsCache(credentials)\n\n\treturn &regionalConfig\n}\n"
  },
  {
    "path": "pkg/clients/factory_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage clients\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/amp\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigatewayv2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/autoscaling\"\n\t\"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi\"\n\t\"github.com/aws/aws-sdk-go-v2/service/storagegateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/sts\"\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"go.uber.org/atomic\"\n\n\tcloudwatch_client \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar jobsCfgWithDefaultRoleAndRegion1 = model.JobsConfig{\n\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t{\n\t\t\tRoles:   []model.Role{{}},\n\t\t\tRegions: []string{\"region1\"},\n\t\t},\n\t},\n}\n\nfunc TestNewFactory_initializes_clients(t *testing.T) {\n\trole1 := model.Role{\n\t\tRoleArn:    \"role1\",\n\t\tExternalID: \"external1\",\n\t}\n\trole2 := model.Role{\n\t\tRoleArn:    \"role2\",\n\t\tExternalID: \"external2\",\n\t}\n\trole3 := model.Role{\n\t\tRoleArn:    \"role3\",\n\t\tExternalID: \"external3\",\n\t}\n\n\tregion1 := \"region1\"\n\tregion2 := \"region2\"\n\tregion3 := \"region3\"\n\ttests := []struct {\n\t\tname       string\n\t\tjobsCfg    model.JobsConfig\n\t\tonlyStatic *bool\n\t}{\n\t\t{\n\t\t\tname: \"from discovery config\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\t\tRegions: []string{region1, region2, region3},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role1, role2, role3},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tonlyStatic: aws.Bool(false),\n\t\t},\n\t\t{\n\t\t\tname: \"from static config\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tStaticJobs: []model.StaticJob{{\n\t\t\t\t\tRegions: []string{region1, region2, region3},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role1, role2, role3},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tonlyStatic: aws.Bool(true),\n\t\t},\n\t\t{\n\t\t\tname: \"from custom config\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{{\n\t\t\t\t\tRegions: []string{region1, region2, region3},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role1, role2, role3},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tonlyStatic: aws.Bool(true),\n\t\t},\n\t\t{\n\t\t\tname: \"from all configs\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\t\tRegions: []string{region1, region2},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role1, role2},\n\t\t\t\t}},\n\t\t\t\tStaticJobs: []model.StaticJob{{\n\t\t\t\t\tRegions: []string{region2, region3},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role2, role3},\n\t\t\t\t}},\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{{\n\t\t\t\t\tRegions: []string{region1, region3},\n\t\t\t\t\tRoles:   []model.Role{defaultRole, role1, role3},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tonlyStatic: nil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\toutput, err := NewFactory(promslog.NewNopLogger(), test.jobsCfg, false)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.False(t, output.refreshed.Load())\n\t\t\tassert.False(t, output.cleared.Load())\n\n\t\t\trequire.Len(t, output.clients, 4)\n\t\t\tassert.Contains(t, output.clients, defaultRole)\n\t\t\tassert.Contains(t, output.clients, role1)\n\t\t\tassert.Contains(t, output.clients, role2)\n\t\t\tassert.Contains(t, output.clients, role3)\n\n\t\t\tfor role, regionalClients := range output.clients {\n\t\t\t\trequire.Len(t, regionalClients, 3)\n\n\t\t\t\tassert.Contains(t, regionalClients, region1)\n\t\t\t\tassert.Contains(t, regionalClients, region2)\n\t\t\t\tassert.Contains(t, regionalClients, region3)\n\n\t\t\t\tfor region, clients := range regionalClients {\n\t\t\t\t\tassert.NotNil(t, clients, \"role %s region %s had nil clients\", role, region)\n\t\t\t\t\tif test.onlyStatic != nil {\n\t\t\t\t\t\tassert.Equal(t, *test.onlyStatic, clients.onlyStatic, \"role %s region %s had unexpected onlyStatic value\", role, region)\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.Equal(t, region, clients.awsConfig.Region)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewFactory_respects_stsregion(t *testing.T) {\n\tstsRegion := \"custom-sts-region\"\n\tcfg := model.JobsConfig{\n\t\tStsRegion: stsRegion,\n\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\tRegions: []string{\"region1\"},\n\t\t\tRoles:   []model.Role{defaultRole},\n\t\t}},\n\t}\n\n\toutput, err := NewFactory(promslog.NewNopLogger(), cfg, false)\n\trequire.NoError(t, err)\n\trequire.Len(t, output.clients, 1)\n\tstsOptions := sts.Options{}\n\toutput.stsOptions(&stsOptions)\n\tassert.Equal(t, stsRegion, stsOptions.Region)\n}\n\nfunc TestCachingFactory_Clear(t *testing.T) {\n\tcache := &CachingFactory{\n\t\tlogger: promslog.NewNopLogger(),\n\t\tclients: map[model.Role]map[awsRegion]*cachedClients{\n\t\t\tdefaultRole: {\n\t\t\t\t\"region1\": &cachedClients{\n\t\t\t\t\tawsConfig:  nil,\n\t\t\t\t\tcloudwatch: testClient{},\n\t\t\t\t\ttagging:    testClient{},\n\t\t\t\t\taccount:    testClient{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\trefreshed: atomic.NewBool(true),\n\t\tcleared:   atomic.NewBool(false),\n\t}\n\n\tcache.Clear()\n\tassert.True(t, cache.cleared.Load())\n\tassert.False(t, cache.refreshed.Load())\n\n\tclients := cache.clients[defaultRole][\"region1\"]\n\trequire.NotNil(t, clients)\n\tassert.Nil(t, clients.cloudwatch)\n\tassert.Nil(t, clients.account)\n\tassert.Nil(t, clients.tagging)\n}\n\nfunc TestCachingFactory_Refresh(t *testing.T) {\n\tt.Run(\"creates all clients when config contains only discovery jobs\", func(t *testing.T) {\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, false)\n\t\trequire.NoError(t, err)\n\n\t\toutput.Refresh()\n\t\tassert.False(t, output.cleared.Load())\n\t\tassert.True(t, output.refreshed.Load())\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\tassert.NotNil(t, clients.cloudwatch)\n\t\tassert.NotNil(t, clients.account)\n\t\tassert.NotNil(t, clients.tagging)\n\t})\n\n\tt.Run(\"creates only cloudwatch when config is only static jobs\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tStaticJobs: []model.StaticJob{{\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t}},\n\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{{\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\toutput.Refresh()\n\t\tassert.False(t, output.cleared.Load())\n\t\tassert.True(t, output.refreshed.Load())\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\tassert.NotNil(t, clients.cloudwatch)\n\t\tassert.Nil(t, clients.account)\n\t\tassert.Nil(t, clients.tagging)\n\t})\n}\n\nfunc TestCachingFactory_GetAccountClient(t *testing.T) {\n\tt.Run(\"refreshed cache does not create new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\toutput.Refresh()\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\tassert.Equal(t, clients.account, output.GetAccountClient(\"region1\", defaultRole))\n\t})\n\n\tt.Run(\"unrefreshed cache creates a new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\trequire.Nil(t, clients.account)\n\n\t\tclient := output.GetAccountClient(\"region1\", defaultRole)\n\t\tassert.Equal(t, clients.account, client)\n\t})\n}\n\nfunc TestCachingFactory_GetCloudwatchClient(t *testing.T) {\n\tt.Run(\"refreshed cache does not create new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\toutput.Refresh()\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\t// Can't do equality comparison due to concurrency limiter\n\t\tassert.NotNil(t, output.GetCloudwatchClient(\"region1\", defaultRole, cloudwatch_client.ConcurrencyConfig{SingleLimit: 1}))\n\t})\n\n\tt.Run(\"unrefreshed cache creates a new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\trequire.Nil(t, clients.cloudwatch)\n\n\t\toutput.GetCloudwatchClient(\"region1\", defaultRole, cloudwatch_client.ConcurrencyConfig{SingleLimit: 1})\n\t\tassert.NotNil(t, clients.cloudwatch)\n\t})\n}\n\nfunc TestCachingFactory_GetTaggingClient(t *testing.T) {\n\tt.Run(\"refreshed cache does not create new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\toutput.Refresh()\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\t// Can't do equality comparison due to concurrency limiter\n\t\tassert.NotNil(t, output.GetTaggingClient(\"region1\", defaultRole, 1))\n\t})\n\n\tt.Run(\"unrefreshed cache creates a new client\", func(t *testing.T) {\n\t\tjobsCfg := model.JobsConfig{\n\t\t\tDiscoveryJobs: []model.DiscoveryJob{{\n\t\t\t\tRoles:   []model.Role{{}},\n\t\t\t\tRegions: []string{\"region1\"},\n\t\t\t}},\n\t\t}\n\n\t\toutput, err := NewFactory(promslog.NewNopLogger(), jobsCfg, false)\n\t\trequire.NoError(t, err)\n\n\t\tclients := output.clients[defaultRole][\"region1\"]\n\t\trequire.NotNil(t, clients)\n\t\trequire.Nil(t, clients.tagging)\n\n\t\toutput.GetTaggingClient(\"region1\", defaultRole, 1)\n\t\tassert.NotNil(t, clients.tagging)\n\t})\n}\n\nfunc TestCachingFactory_createTaggingClient_DoesNotEnableFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createTaggingClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[resourcegroupstaggingapi.Client, resourcegroupstaggingapi.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateUnset)\n}\n\nfunc TestCachingFactory_createAPIGatewayClient_EnablesFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createAPIGatewayClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[apigateway.Client, apigateway.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateEnabled)\n}\n\nfunc TestCachingFactory_createAPIGatewayV2Client_EnablesFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createAPIGatewayV2Client(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[apigatewayv2.Client, apigatewayv2.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateEnabled)\n}\n\nfunc TestCachingFactory_createAutoScalingClient_DoesNotEnableFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createAutoScalingClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[autoscaling.Client, autoscaling.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateUnset)\n}\n\nfunc TestCachingFactory_createEC2Client_EnablesFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createEC2Client(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[ec2.Client, ec2.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateEnabled)\n}\n\nfunc TestCachingFactory_createDMSClient_EnablesFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createDMSClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[databasemigrationservice.Client, databasemigrationservice.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateEnabled)\n}\n\nfunc TestCachingFactory_createStorageGatewayClient_EnablesFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createStorageGatewayClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[storagegateway.Client, storagegateway.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateEnabled)\n}\n\nfunc TestCachingFactory_createPrometheusClient_DoesNotEnableFIPS(t *testing.T) {\n\tfactory, err := NewFactory(promslog.NewNopLogger(), jobsCfgWithDefaultRoleAndRegion1, true)\n\trequire.NoError(t, err)\n\n\tclient := factory.createPrometheusClient(factory.clients[defaultRole][\"region1\"].awsConfig)\n\trequire.NotNil(t, client)\n\n\toptions := getOptions[amp.Client, amp.Options](client)\n\trequire.NotNil(t, options)\n\n\tassert.Equal(t, options.EndpointOptions.UseFIPSEndpoint, aws.FIPSEndpointStateUnset)\n}\n\nfunc TestRaceConditionRefreshClear(t *testing.T) {\n\t// Create a factory with the test config\n\tfactory, err := NewFactory(promslog.NewNopLogger(), model.JobsConfig{}, false)\n\trequire.NoError(t, err)\n\n\t// Number of concurrent operations to perform\n\titerations := 100\n\n\t// Use WaitGroup to synchronize goroutines\n\tvar wg sync.WaitGroup\n\twg.Add(iterations) // For both Refresh and Clear calls\n\n\t// Start function to run concurrent operations\n\tfor i := 0; i < iterations; i++ {\n\t\t// Launch goroutine to call Refresh\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfactory.Refresh()\n\t\t\tfactory.Clear()\n\t\t}()\n\t}\n\n\t// Create a channel to signal completion\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\n\t// Wait for either completion or timeout\n\tselect {\n\tcase <-done:\n\t\t// Test completed successfully\n\tcase <-time.After(60 * time.Second):\n\t\trequire.Fail(t, \"Test timed out after 60 seconds\")\n\t}\n}\n\n// getOptions uses reflection to pull the unexported options field off of any AWS Client\n// the options of the client carries around a lot of info about how the client will behave and is helpful for\n// testing lower level sdk configuration\nfunc getOptions[T any, V any](awsClient *T) V {\n\tfield := reflect.ValueOf(awsClient).Elem().FieldByName(\"options\")\n\toptions := reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Interface().(V)\n\treturn options\n}\n\ntype testClient struct{}\n\nfunc (t testClient) GetResources(_ context.Context, _ model.DiscoveryJob, _ string) ([]*model.TaggedResource, error) {\n\treturn nil, nil\n}\n\nfunc (t testClient) GetAccount(_ context.Context) (string, error) {\n\treturn \"\", nil\n}\n\nfunc (t testClient) GetAccountAlias(_ context.Context) (string, error) {\n\treturn \"\", nil\n}\n\nfunc (t testClient) ListMetrics(_ context.Context, _ string, _ *model.MetricConfig, _ bool, _ func(page []*model.Metric)) error {\n\treturn nil\n}\n\nfunc (t testClient) GetMetricData(_ context.Context, _ []*model.CloudwatchData, _ string, _ time.Time, _ time.Time) []cloudwatch_client.MetricDataResult {\n\treturn nil\n}\n\nfunc (t testClient) GetMetricStatistics(_ context.Context, _ *slog.Logger, _ []model.Dimension, _ string, _ *model.MetricConfig) []*model.MetricStatisticsResult {\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/clients/tagging/client.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage tagging\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/amp\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigatewayv2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/autoscaling\"\n\t\"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi\"\n\t\"github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types\"\n\t\"github.com/aws/aws-sdk-go-v2/service/shield\"\n\t\"github.com/aws/aws-sdk-go-v2/service/storagegateway\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil\"\n)\n\ntype Client interface {\n\tGetResources(ctx context.Context, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error)\n}\n\nvar ErrExpectedToFindResources = errors.New(\"expected to discover resources but none were found\")\n\ntype client struct {\n\tlogger            *slog.Logger\n\ttaggingAPI        *resourcegroupstaggingapi.Client\n\tautoscalingAPI    *autoscaling.Client\n\tapiGatewayAPI     *apigateway.Client\n\tapiGatewayV2API   *apigatewayv2.Client\n\tec2API            *ec2.Client\n\tdmsAPI            *databasemigrationservice.Client\n\tprometheusSvcAPI  *amp.Client\n\tstorageGatewayAPI *storagegateway.Client\n\tshieldAPI         *shield.Client\n}\n\nfunc NewClient(\n\tlogger *slog.Logger,\n\ttaggingAPI *resourcegroupstaggingapi.Client,\n\tautoscalingAPI *autoscaling.Client,\n\tapiGatewayAPI *apigateway.Client,\n\tapiGatewayV2API *apigatewayv2.Client,\n\tec2API *ec2.Client,\n\tdmsClient *databasemigrationservice.Client,\n\tprometheusClient *amp.Client,\n\tstorageGatewayAPI *storagegateway.Client,\n\tshieldAPI *shield.Client,\n) Client {\n\treturn &client{\n\t\tlogger:            logger,\n\t\ttaggingAPI:        taggingAPI,\n\t\tautoscalingAPI:    autoscalingAPI,\n\t\tapiGatewayAPI:     apiGatewayAPI,\n\t\tapiGatewayV2API:   apiGatewayV2API,\n\t\tec2API:            ec2API,\n\t\tdmsAPI:            dmsClient,\n\t\tprometheusSvcAPI:  prometheusClient,\n\t\tstorageGatewayAPI: storageGatewayAPI,\n\t\tshieldAPI:         shieldAPI,\n\t}\n}\n\nfunc (c client) GetResources(ctx context.Context, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\tsvc := config.SupportedServices.GetService(job.Namespace)\n\tvar resources []*model.TaggedResource\n\tshouldHaveDiscoveredResources := false\n\n\tif len(svc.ResourceFilters) > 0 {\n\t\tshouldHaveDiscoveredResources = true\n\t\tfilters := make([]string, 0, len(svc.ResourceFilters))\n\t\tfor _, filter := range svc.ResourceFilters {\n\t\t\tfilters = append(filters, *filter)\n\t\t}\n\t\tvar tagFilters []types.TagFilter\n\t\tif len(job.SearchTags) > 0 {\n\t\t\tfor i := range job.SearchTags {\n\t\t\t\t// Because everything with the AWS APIs is pointers we need a pointer to the `Key` field from the SearchTag.\n\t\t\t\t// We can't take a pointer to any fields from loop variable or the pointer will always be the same and this logic will be broken.\n\t\t\t\tst := job.SearchTags[i]\n\n\t\t\t\t// AWS's GetResources has a TagFilter option which matches the semantics of our SearchTags where all filters must match\n\t\t\t\t// Their value matching implementation is different though so instead of mapping the Key and Value we only map the Keys.\n\t\t\t\t// Their API docs say, \"If you don't specify a value for a key, the response returns all resources that are tagged with that key, with any or no value.\"\n\t\t\t\t// which makes this a safe way to reduce the amount of data we need to filter out.\n\t\t\t\t// https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFilters\n\t\t\t\ttagFilters = append(tagFilters, types.TagFilter{Key: &st.Key})\n\t\t\t}\n\t\t}\n\t\tinputparams := &resourcegroupstaggingapi.GetResourcesInput{\n\t\t\tResourceTypeFilters: filters,\n\t\t\tResourcesPerPage:    aws.Int32(int32(100)), // max allowed value according to API docs\n\t\t\tTagFilters:          tagFilters,\n\t\t}\n\n\t\tpaginator := resourcegroupstaggingapi.NewGetResourcesPaginator(c.taggingAPI, inputparams, func(options *resourcegroupstaggingapi.GetResourcesPaginatorOptions) {\n\t\t\toptions.StopOnDuplicateToken = true\n\t\t})\n\t\tfor paginator.HasMorePages() {\n\t\t\tpromutil.ResourceGroupTaggingAPICounter.Inc()\n\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, resourceTagMapping := range page.ResourceTagMappingList {\n\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\tARN:       *resourceTagMapping.ResourceARN,\n\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\tRegion:    region,\n\t\t\t\t\tTags:      make([]model.Tag, 0, len(resourceTagMapping.Tags)),\n\t\t\t\t}\n\n\t\t\t\tfor _, t := range resourceTagMapping.Tags {\n\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: *t.Key, Value: *t.Value})\n\t\t\t\t}\n\n\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t} else {\n\t\t\t\t\tc.logger.Debug(\"Skipping resource because search tags do not match\", \"arn\", resource.ARN)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tc.logger.Debug(\"GetResourcesPages finished\", \"total\", len(resources))\n\t}\n\n\tif ext, ok := ServiceFilters[svc.Namespace]; ok {\n\t\tif ext.ResourceFunc != nil {\n\t\t\tshouldHaveDiscoveredResources = true\n\t\t\tnewResources, err := ext.ResourceFunc(ctx, c, job, region)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to apply ResourceFunc for %s, %w\", svc.Namespace, err)\n\t\t\t}\n\t\t\tresources = append(resources, newResources...)\n\t\t\tc.logger.Debug(\"ResourceFunc finished\", \"total\", len(resources))\n\t\t}\n\n\t\tif ext.FilterFunc != nil {\n\t\t\tfilteredResources, err := ext.FilterFunc(ctx, c, resources)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to apply FilterFunc for %s, %w\", svc.Namespace, err)\n\t\t\t}\n\t\t\tresources = filteredResources\n\t\t\tc.logger.Debug(\"FilterFunc finished\", \"total\", len(resources))\n\t\t}\n\t}\n\n\tif shouldHaveDiscoveredResources && len(resources) == 0 {\n\t\treturn nil, ErrExpectedToFindResources\n\t}\n\n\treturn resources, nil\n}\n"
  },
  {
    "path": "pkg/clients/tagging/concurrency_client.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage tagging\n\nimport (\n\t\"context\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype limitedConcurrencyClient struct {\n\tclient Client\n\tsem    chan struct{}\n}\n\nfunc NewLimitedConcurrencyClient(client Client, maxConcurrency int) Client {\n\treturn &limitedConcurrencyClient{\n\t\tclient: client,\n\t\tsem:    make(chan struct{}, maxConcurrency),\n\t}\n}\n\nfunc (c limitedConcurrencyClient) GetResources(ctx context.Context, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\tc.sem <- struct{}{}\n\tres, err := c.client.GetResources(ctx, job, region)\n\t<-c.sem\n\treturn res, err\n}\n"
  },
  {
    "path": "pkg/clients/tagging/filters.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage tagging\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/aws/arn\"\n\t\"github.com/aws/aws-sdk-go-v2/service/amp\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigateway\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigatewayv2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/autoscaling\"\n\t\"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/shield\"\n\t\"github.com/aws/aws-sdk-go-v2/service/storagegateway\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil\"\n)\n\ntype ServiceFilter struct {\n\t// ResourceFunc can be used to fetch additional resources\n\tResourceFunc func(context.Context, client, model.DiscoveryJob, string) ([]*model.TaggedResource, error)\n\n\t// FilterFunc can be used to modify the input resources or to drop based on some condition\n\tFilterFunc func(context.Context, client, []*model.TaggedResource) ([]*model.TaggedResource, error)\n}\n\n// ServiceFilters maps a service namespace to (optional) ServiceFilter\nvar ServiceFilters = map[string]ServiceFilter{\n\t\"AWS/ApiGateway\": {\n\t\t// ApiGateway ARNs use the Id (for v1 REST APIs) and ApiId (for v2 APIs) instead of\n\t\t// the ApiName (display name). See https://docs.aws.amazon.com/apigateway/latest/developerguide/arn-format-reference.html\n\t\t// However, in metrics, the ApiId dimension uses the ApiName as value.\n\t\t//\n\t\t// Here we use the ApiGateway API to map resource correctly. For backward compatibility,\n\t\t// in v1 REST APIs we change the ARN to replace the ApiId with ApiName, while for v2 APIs\n\t\t// we leave the ARN as-is.\n\t\tFilterFunc: func(ctx context.Context, client client, inputResources []*model.TaggedResource) ([]*model.TaggedResource, error) {\n\t\t\tvar limit int32 = 500 // max number of results per page. default=25, max=500\n\t\t\tconst maxPages = 10\n\t\t\tinput := apigateway.GetRestApisInput{Limit: &limit}\n\t\t\toutput := apigateway.GetRestApisOutput{}\n\t\t\tvar pageNum int\n\n\t\t\tpaginator := apigateway.NewGetRestApisPaginator(client.apiGatewayAPI, &input, func(options *apigateway.GetRestApisPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum <= maxPages {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.APIGatewayAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling apiGatewayAPI.GetRestApis, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\t\t\t\toutput.Items = append(output.Items, page.Items...)\n\t\t\t}\n\n\t\t\toutputV2, err := client.apiGatewayV2API.GetApis(ctx, &apigatewayv2.GetApisInput{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error calling apigatewayv2.GetApis, %w\", err)\n\t\t\t}\n\n\t\t\tvar outputResources []*model.TaggedResource\n\t\t\tfor _, resource := range inputResources {\n\t\t\t\tfor i, gw := range output.Items {\n\t\t\t\t\tif strings.HasSuffix(resource.ARN, \"/restapis/\"+*gw.Id) {\n\t\t\t\t\t\tr := resource\n\t\t\t\t\t\tr.ARN = strings.ReplaceAll(resource.ARN, *gw.Id, *gw.Name)\n\t\t\t\t\t\toutputResources = append(outputResources, r)\n\t\t\t\t\t\toutput.Items = append(output.Items[:i], output.Items[i+1:]...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor i, gw := range outputV2.Items {\n\t\t\t\t\tif strings.HasSuffix(resource.ARN, \"/apis/\"+*gw.ApiId) {\n\t\t\t\t\t\toutputResources = append(outputResources, resource)\n\t\t\t\t\t\toutputV2.Items = append(outputV2.Items[:i], outputV2.Items[i+1:]...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn outputResources, nil\n\t\t},\n\t},\n\t\"AWS/AutoScaling\": {\n\t\tResourceFunc: func(ctx context.Context, client client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tpageNum := 0\n\t\t\tvar resources []*model.TaggedResource\n\t\t\tpaginator := autoscaling.NewDescribeAutoScalingGroupsPaginator(client.autoscalingAPI, &autoscaling.DescribeAutoScalingGroupsInput{}, func(options *autoscaling.DescribeAutoScalingGroupsPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.AutoScalingAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling autoscalingAPI.DescribeAutoScalingGroups, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, asg := range page.AutoScalingGroups {\n\t\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\t\tARN:       *asg.AutoScalingGroupARN,\n\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, t := range asg.Tags {\n\t\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: *t.Key, Value: *t.Value})\n\t\t\t\t\t}\n\n\t\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn resources, nil\n\t\t},\n\t},\n\t\"AWS/DMS\": {\n\t\t// Append the replication instance identifier to DMS task and instance ARNs\n\t\tFilterFunc: func(ctx context.Context, client client, inputResources []*model.TaggedResource) ([]*model.TaggedResource, error) {\n\t\t\tif len(inputResources) == 0 {\n\t\t\t\treturn inputResources, nil\n\t\t\t}\n\n\t\t\treplicationInstanceIdentifiers := make(map[string]string)\n\n\t\t\tpageNum := 0\n\t\t\tinstancesPaginator := databasemigrationservice.NewDescribeReplicationInstancesPaginator(client.dmsAPI, &databasemigrationservice.DescribeReplicationInstancesInput{}, func(options *databasemigrationservice.DescribeReplicationInstancesPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor instancesPaginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := instancesPaginator.NextPage(ctx)\n\t\t\t\tpromutil.DmsAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling dmsAPI.DescribeReplicationInstances, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, instance := range page.ReplicationInstances {\n\t\t\t\t\treplicationInstanceIdentifiers[*instance.ReplicationInstanceArn] = *instance.ReplicationInstanceIdentifier\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tpageNum = 0\n\t\t\ttasksPaginator := databasemigrationservice.NewDescribeReplicationTasksPaginator(client.dmsAPI, &databasemigrationservice.DescribeReplicationTasksInput{}, func(options *databasemigrationservice.DescribeReplicationTasksPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor tasksPaginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := tasksPaginator.NextPage(ctx)\n\t\t\t\tpromutil.DmsAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling dmsAPI.DescribeReplicationTasks, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, task := range page.ReplicationTasks {\n\t\t\t\t\ttaskInstanceArn := *task.ReplicationInstanceArn\n\t\t\t\t\tif instanceIdentifier, ok := replicationInstanceIdentifiers[taskInstanceArn]; ok {\n\t\t\t\t\t\treplicationInstanceIdentifiers[*task.ReplicationTaskArn] = instanceIdentifier\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar outputResources []*model.TaggedResource\n\t\t\tfor _, resource := range inputResources {\n\t\t\t\tr := resource\n\t\t\t\t// Append the replication instance identifier to replication instance and task ARNs\n\t\t\t\tif instanceIdentifier, ok := replicationInstanceIdentifiers[r.ARN]; ok {\n\t\t\t\t\tr.ARN = fmt.Sprintf(\"%s/%s\", r.ARN, instanceIdentifier)\n\t\t\t\t}\n\t\t\t\toutputResources = append(outputResources, r)\n\t\t\t}\n\t\t\treturn outputResources, nil\n\t\t},\n\t},\n\t\"AWS/EC2Spot\": {\n\t\tResourceFunc: func(ctx context.Context, client client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tpageNum := 0\n\t\t\tvar resources []*model.TaggedResource\n\t\t\tpaginator := ec2.NewDescribeSpotFleetRequestsPaginator(client.ec2API, &ec2.DescribeSpotFleetRequestsInput{}, func(options *ec2.DescribeSpotFleetRequestsPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.Ec2APICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling describing ec2API.DescribeSpotFleetRequests, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, ec2Spot := range page.SpotFleetRequestConfigs {\n\t\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\t\tARN:       *ec2Spot.SpotFleetRequestId,\n\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, t := range ec2Spot.Tags {\n\t\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: *t.Key, Value: *t.Value})\n\t\t\t\t\t}\n\n\t\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn resources, nil\n\t\t},\n\t},\n\t\"AWS/Prometheus\": {\n\t\tResourceFunc: func(ctx context.Context, client client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tpageNum := 0\n\t\t\tvar resources []*model.TaggedResource\n\t\t\tpaginator := amp.NewListWorkspacesPaginator(client.prometheusSvcAPI, &amp.ListWorkspacesInput{}, func(options *amp.ListWorkspacesPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.ManagedPrometheusAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error while calling prometheusSvcAPI.ListWorkspaces, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, ws := range page.Workspaces {\n\t\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\t\tARN:       *ws.Arn,\n\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor key, value := range ws.Tags {\n\t\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: key, Value: value})\n\t\t\t\t\t}\n\n\t\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn resources, nil\n\t\t},\n\t},\n\t\"AWS/StorageGateway\": {\n\t\tResourceFunc: func(ctx context.Context, client client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tpageNum := 0\n\t\t\tvar resources []*model.TaggedResource\n\t\t\tpaginator := storagegateway.NewListGatewaysPaginator(client.storageGatewayAPI, &storagegateway.ListGatewaysInput{}, func(options *storagegateway.ListGatewaysPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.StoragegatewayAPICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling storageGatewayAPI.ListGateways, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, gwa := range page.Gateways {\n\t\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\t\tARN:       fmt.Sprintf(\"%s/%s\", *gwa.GatewayId, *gwa.GatewayName),\n\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t}\n\n\t\t\t\t\ttagsRequest := &storagegateway.ListTagsForResourceInput{\n\t\t\t\t\t\tResourceARN: gwa.GatewayARN,\n\t\t\t\t\t}\n\t\t\t\t\ttagsResponse, _ := client.storageGatewayAPI.ListTagsForResource(ctx, tagsRequest)\n\t\t\t\t\tpromutil.StoragegatewayAPICounter.Inc()\n\n\t\t\t\t\tfor _, t := range tagsResponse.Tags {\n\t\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: *t.Key, Value: *t.Value})\n\t\t\t\t\t}\n\n\t\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn resources, nil\n\t\t},\n\t},\n\t\"AWS/TransitGateway\": {\n\t\tResourceFunc: func(ctx context.Context, client client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tpageNum := 0\n\t\t\tvar resources []*model.TaggedResource\n\t\t\tpaginator := ec2.NewDescribeTransitGatewayAttachmentsPaginator(client.ec2API, &ec2.DescribeTransitGatewayAttachmentsInput{}, func(options *ec2.DescribeTransitGatewayAttachmentsPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpromutil.Ec2APICounter.Inc()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling ec2API.DescribeTransitGatewayAttachments, %w\", err)\n\t\t\t\t}\n\t\t\t\tpageNum++\n\n\t\t\t\tfor _, tgwa := range page.TransitGatewayAttachments {\n\t\t\t\t\tresource := model.TaggedResource{\n\t\t\t\t\t\tARN:       fmt.Sprintf(\"%s/%s\", *tgwa.TransitGatewayId, *tgwa.TransitGatewayAttachmentId),\n\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, t := range tgwa.Tags {\n\t\t\t\t\t\tresource.Tags = append(resource.Tags, model.Tag{Key: *t.Key, Value: *t.Value})\n\t\t\t\t\t}\n\n\t\t\t\t\tif resource.FilterThroughTags(job.SearchTags) {\n\t\t\t\t\t\tresources = append(resources, &resource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn resources, nil\n\t\t},\n\t},\n\t\"AWS/DDoSProtection\": {\n\t\t// Resource discovery only targets the protections, protections are global, so they will only be discoverable in us-east-1.\n\t\t// Outside us-east-1 no resources are going to be found. We use the shield.ListProtections API to get the protections +\n\t\t// protected resources to add to the tagged resources. This data is eventually usable for joining with metrics.\n\t\tResourceFunc: func(ctx context.Context, c client, job model.DiscoveryJob, region string) ([]*model.TaggedResource, error) {\n\t\t\tvar output []*model.TaggedResource\n\t\t\t// Default page size is only 20 which can easily lead to throttling\n\t\t\trequest := &shield.ListProtectionsInput{MaxResults: aws.Int32(1000)}\n\t\t\tpaginator := shield.NewListProtectionsPaginator(c.shieldAPI, request, func(options *shield.ListProtectionsPaginatorOptions) {\n\t\t\t\toptions.StopOnDuplicateToken = true\n\t\t\t})\n\t\t\tpageNum := 0\n\t\t\tfor paginator.HasMorePages() && pageNum < 100 {\n\t\t\t\tpromutil.ShieldAPICounter.Inc()\n\t\t\t\tpage, err := paginator.NextPage(ctx)\n\t\t\t\tpageNum++\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error calling shieldAPI.ListProtections, %w\", err)\n\t\t\t\t}\n\n\t\t\t\tfor _, protection := range page.Protections {\n\t\t\t\t\tprotectedResourceArn := *protection.ResourceArn\n\t\t\t\t\tprotectionArn := *protection.ProtectionArn\n\t\t\t\t\tprotectedResource, err := arn.Parse(protectedResourceArn)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"shieldAPI.ListProtections returned an invalid ProtectedResourceArn %s for Protection %s\", protectedResourceArn, protectionArn)\n\t\t\t\t\t}\n\n\t\t\t\t\t// Shield covers regional services,\n\t\t\t\t\t// \t\tEC2 (arn:aws:ec2:<REGION>:<ACCOUNT_ID>:eip-allocation/*)\n\t\t\t\t\t// \t\tload balancers (arn:aws:elasticloadbalancing:<REGION>:<ACCOUNT_ID>:loadbalancer:*)\n\t\t\t\t\t// \twhere the region of the protectedResource ARN should match the region for the job to prevent\n\t\t\t\t\t// \tduplicating resources across all regions\n\t\t\t\t\t// Shield also covers other global services,\n\t\t\t\t\t// \t\tglobal accelerator (arn:aws:globalaccelerator::<ACCOUNT_ID>:accelerator/*)\n\t\t\t\t\t//\t\troute53 (arn:aws:route53:::hostedzone/*)\n\t\t\t\t\t//\twhere the protectedResource contains no region. Just like other global services the metrics for\n\t\t\t\t\t//\tthese land in us-east-1 so any protected resource without a region should be added when the job\n\t\t\t\t\t//\tis for us-east-1\n\t\t\t\t\tif protectedResource.Region == region || (protectedResource.Region == \"\" && region == \"us-east-1\") {\n\t\t\t\t\t\ttaggedResource := &model.TaggedResource{\n\t\t\t\t\t\t\tARN:       protectedResourceArn,\n\t\t\t\t\t\t\tNamespace: job.Namespace,\n\t\t\t\t\t\t\tRegion:    region,\n\t\t\t\t\t\t\tTags:      []model.Tag{{Key: \"ProtectionArn\", Value: protectionArn}},\n\t\t\t\t\t\t}\n\t\t\t\t\t\toutput = append(output, taggedResource)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn output, nil\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "pkg/clients/tagging/filters_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage tagging\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigateway\"\n\tapigtypes \"github.com/aws/aws-sdk-go-v2/service/apigateway/types\"\n\t\"github.com/aws/aws-sdk-go-v2/service/apigatewayv2\"\n\tapigv2types \"github.com/aws/aws-sdk-go-v2/service/apigatewayv2/types\"\n\t\"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice\"\n\tdmstypes \"github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types\"\n\t\"github.com/aws/smithy-go/middleware\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestValidServiceFilterNames(t *testing.T) {\n\tfor svc, filter := range ServiceFilters {\n\t\tif config.SupportedServices.GetService(svc) == nil {\n\t\t\tt.Errorf(\"invalid service name '%s' in ServiceFilters\", svc)\n\t\t}\n\n\t\tif filter.FilterFunc == nil && filter.ResourceFunc == nil {\n\t\t\tt.Errorf(\"no filter functions defined for service name '%s'\", svc)\n\t\t}\n\t}\n}\n\n// mockAPIOption returns middleware that intercepts AWS SDK v2 API calls and returns\n// mock responses keyed by operation name, short-circuiting before the HTTP call.\nfunc mockAPIOption(responses map[string]interface{}) func(*middleware.Stack) error {\n\treturn func(stack *middleware.Stack) error {\n\t\treturn stack.Finalize.Add(\n\t\t\tmiddleware.FinalizeMiddlewareFunc(\"mock\",\n\t\t\t\tfunc(ctx context.Context, _ middleware.FinalizeInput, _ middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) {\n\t\t\t\t\topName := middleware.GetOperationName(ctx)\n\t\t\t\t\tif resp, ok := responses[opName]; ok {\n\t\t\t\t\t\treturn middleware.FinalizeOutput{Result: resp}, middleware.Metadata{}, nil\n\t\t\t\t\t}\n\t\t\t\t\treturn middleware.FinalizeOutput{}, middleware.Metadata{}, fmt.Errorf(\"unexpected operation: %s\", opName)\n\t\t\t\t},\n\t\t\t),\n\t\t\tmiddleware.Before,\n\t\t)\n\t}\n}\n\nfunc TestApiGatewayFilterFunc(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tapiGatewayAPI   *apigateway.Client\n\t\tapiGatewayV2API *apigatewayv2.Client\n\t\tinputResources  []*model.TaggedResource\n\t\toutputResources []*model.TaggedResource\n\t}{\n\t\t{\n\t\t\tname: \"API Gateway v1 REST API: stages are filtered and IDs replaced with names\",\n\t\t\tapiGatewayAPI: apigateway.New(apigateway.Options{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAPIOptions: []func(*middleware.Stack) error{\n\t\t\t\t\tmockAPIOption(map[string]interface{}{\n\t\t\t\t\t\t\"GetRestApis\": &apigateway.GetRestApisOutput{\n\t\t\t\t\t\t\tItems: []apigtypes.RestApi{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tId:   aws.String(\"gwid1234\"),\n\t\t\t\t\t\t\t\t\tName: aws.String(\"apiname\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t\tapiGatewayV2API: apigatewayv2.New(apigatewayv2.Options{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAPIOptions: []func(*middleware.Stack) error{\n\t\t\t\t\tmockAPIOption(map[string]interface{}{\n\t\t\t\t\t\t\"GetApis\": &apigatewayv2.GetApisOutput{\n\t\t\t\t\t\t\tItems: []apigv2types.Api{},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t\tinputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/restapis/gwid1234/stages/main\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/restapis/gwid1234\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\toutputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/restapis/apiname\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"API Gateway v2 REST API: stages are filtered\",\n\t\t\tapiGatewayAPI: apigateway.New(apigateway.Options{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAPIOptions: []func(*middleware.Stack) error{\n\t\t\t\t\tmockAPIOption(map[string]interface{}{\n\t\t\t\t\t\t\"GetRestApis\": &apigateway.GetRestApisOutput{\n\t\t\t\t\t\t\tItems: []apigtypes.RestApi{},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t\tapiGatewayV2API: apigatewayv2.New(apigatewayv2.Options{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAPIOptions: []func(*middleware.Stack) error{\n\t\t\t\t\tmockAPIOption(map[string]interface{}{\n\t\t\t\t\t\t\"GetApis\": &apigatewayv2.GetApisOutput{\n\t\t\t\t\t\t\tItems: []apigv2types.Api{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tApiId: aws.String(\"gwid9876\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t\tinputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/apis/gwid9876/stages/$default\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/apis/gwid9876\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\toutputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:apigateway:us-east-1::/apis/gwid9876\",\n\t\t\t\t\tNamespace: \"apigateway\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags:      []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := client{\n\t\t\t\tapiGatewayAPI:   tc.apiGatewayAPI,\n\t\t\t\tapiGatewayV2API: tc.apiGatewayV2API,\n\t\t\t}\n\n\t\t\tfilter := ServiceFilters[\"AWS/ApiGateway\"]\n\t\t\trequire.NotNil(t, filter.FilterFunc)\n\n\t\t\toutputResources, err := filter.FilterFunc(context.Background(), c, tc.inputResources)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tc.outputResources, outputResources)\n\t\t})\n\t}\n}\n\nfunc TestDMSFilterFunc(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tdmsAPI          *databasemigrationservice.Client\n\t\tinputResources  []*model.TaggedResource\n\t\toutputResources []*model.TaggedResource\n\t}{\n\t\t{\n\t\t\tname:            \"empty input resources\",\n\t\t\tinputResources:  []*model.TaggedResource{},\n\t\t\toutputResources: []*model.TaggedResource{},\n\t\t},\n\t\t{\n\t\t\tname: \"replication instance identifiers appended to task and instance ARNs\",\n\t\t\tdmsAPI: databasemigrationservice.New(databasemigrationservice.Options{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAPIOptions: []func(*middleware.Stack) error{\n\t\t\t\t\tmockAPIOption(map[string]interface{}{\n\t\t\t\t\t\t\"DescribeReplicationInstances\": &databasemigrationservice.DescribeReplicationInstancesOutput{\n\t\t\t\t\t\t\tReplicationInstances: []dmstypes.ReplicationInstance{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn:        aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:ABCDEFG1234567890\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceIdentifier: aws.String(\"repl-instance-identifier-1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn:        aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:ZZZZZZZZZZZZZZZZZ\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceIdentifier: aws.String(\"repl-instance-identifier-2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn:        aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:YYYYYYYYYYYYYYYYY\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceIdentifier: aws.String(\"repl-instance-identifier-3\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"DescribeReplicationTasks\": &databasemigrationservice.DescribeReplicationTasksOutput{\n\t\t\t\t\t\t\tReplicationTasks: []dmstypes.ReplicationTask{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationTaskArn:     aws.String(\"arn:aws:dms:us-east-1:123123123123:task:9999999999999999\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn: aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:ZZZZZZZZZZZZZZZZZ\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationTaskArn:     aws.String(\"arn:aws:dms:us-east-1:123123123123:task:2222222222222222\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn: aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:ZZZZZZZZZZZZZZZZZ\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tReplicationTaskArn:     aws.String(\"arn:aws:dms:us-east-1:123123123123:task:3333333333333333\"),\n\t\t\t\t\t\t\t\t\tReplicationInstanceArn: aws.String(\"arn:aws:dms:us-east-1:123123123123:rep:WWWWWWWWWWWWWWWWW\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t},\n\t\t\t}),\n\t\t\tinputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:rep:ABCDEFG1234567890\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:rep:WXYZ987654321\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:task:9999999999999999\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 3\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:task:5555555555555555\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 4\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:subgrp:demo-subgrp\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 5\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:endpoint:1111111111111111\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 6\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\toutputResources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:rep:ABCDEFG1234567890/repl-instance-identifier-1\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:rep:WXYZ987654321\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 2\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:task:9999999999999999/repl-instance-identifier-2\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 3\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:task:5555555555555555\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 4\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:subgrp:demo-subgrp\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 5\"}},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN: \"arn:aws:dms:us-east-1:123123123123:endpoint:1111111111111111\", Namespace: \"dms\", Region: \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{{Key: \"Test\", Value: \"Value 6\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := client{\n\t\t\t\tdmsAPI: tc.dmsAPI,\n\t\t\t}\n\n\t\t\tfilter := ServiceFilters[\"AWS/DMS\"]\n\t\t\trequire.NotNil(t, filter.FilterFunc)\n\n\t\t\toutputResources, err := filter.FilterFunc(context.Background(), c, tc.inputResources)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tc.outputResources, outputResources)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/config.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"os\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/grafana/regexp\"\n\t\"go.yaml.in/yaml/v2\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype ScrapeConf struct {\n\tAPIVersion      string             `yaml:\"apiVersion\"`\n\tStsRegion       string             `yaml:\"sts-region\"`\n\tDiscovery       Discovery          `yaml:\"discovery\"`\n\tStatic          []*Static          `yaml:\"static\"`\n\tCustomNamespace []*CustomNamespace `yaml:\"customNamespace\"`\n}\n\ntype Discovery struct {\n\tExportedTagsOnMetrics ExportedTagsOnMetrics `yaml:\"exportedTagsOnMetrics\"`\n\tJobs                  []*Job                `yaml:\"jobs\"`\n}\n\ntype ExportedTagsOnMetrics map[string][]string\n\ntype Tag struct {\n\tKey   string `yaml:\"key\"`\n\tValue string `yaml:\"value\"`\n}\n\ntype JobLevelMetricFields struct {\n\tStatistics             []string `yaml:\"statistics\"`\n\tPeriod                 int64    `yaml:\"period\"`\n\tLength                 int64    `yaml:\"length\"`\n\tDelay                  int64    `yaml:\"delay\"`\n\tNilToZero              *bool    `yaml:\"nilToZero\"`\n\tAddCloudwatchTimestamp *bool    `yaml:\"addCloudwatchTimestamp\"`\n\tExportAllDataPoints    *bool    `yaml:\"exportAllDataPoints\"`\n}\n\ntype Job struct {\n\tRegions                     []string          `yaml:\"regions\"`\n\tType                        string            `yaml:\"type\"`\n\tRoles                       []Role            `yaml:\"roles\"`\n\tSearchTags                  []Tag             `yaml:\"searchTags\"`\n\tCustomTags                  []Tag             `yaml:\"customTags\"`\n\tDimensionNameRequirements   []string          `yaml:\"dimensionNameRequirements\"`\n\tMetrics                     []*Metric         `yaml:\"metrics\"`\n\tRoundingPeriod              *int64            `yaml:\"roundingPeriod\"`\n\tRecentlyActiveOnly          bool              `yaml:\"recentlyActiveOnly\"`\n\tIncludeContextOnInfoMetrics bool              `yaml:\"includeContextOnInfoMetrics\"`\n\tEnhancedMetrics             []*EnhancedMetric `yaml:\"enhancedMetrics\"`\n\tJobLevelMetricFields        `yaml:\",inline\"`\n}\n\ntype EnhancedMetric struct {\n\tName string `yaml:\"name\"`\n}\n\ntype Static struct {\n\tName       string      `yaml:\"name\"`\n\tRegions    []string    `yaml:\"regions\"`\n\tRoles      []Role      `yaml:\"roles\"`\n\tNamespace  string      `yaml:\"namespace\"`\n\tCustomTags []Tag       `yaml:\"customTags\"`\n\tDimensions []Dimension `yaml:\"dimensions\"`\n\tMetrics    []*Metric   `yaml:\"metrics\"`\n}\n\ntype CustomNamespace struct {\n\tRegions                   []string  `yaml:\"regions\"`\n\tName                      string    `yaml:\"name\"`\n\tNamespace                 string    `yaml:\"namespace\"`\n\tRecentlyActiveOnly        bool      `yaml:\"recentlyActiveOnly\"`\n\tRoles                     []Role    `yaml:\"roles\"`\n\tMetrics                   []*Metric `yaml:\"metrics\"`\n\tCustomTags                []Tag     `yaml:\"customTags\"`\n\tDimensionNameRequirements []string  `yaml:\"dimensionNameRequirements\"`\n\tRoundingPeriod            *int64    `yaml:\"roundingPeriod\"`\n\tJobLevelMetricFields      `yaml:\",inline\"`\n}\n\ntype Metric struct {\n\tName                   string   `yaml:\"name\"`\n\tStatistics             []string `yaml:\"statistics\"`\n\tPeriod                 int64    `yaml:\"period\"`\n\tLength                 int64    `yaml:\"length\"`\n\tDelay                  int64    `yaml:\"delay\"`\n\tNilToZero              *bool    `yaml:\"nilToZero\"`\n\tAddCloudwatchTimestamp *bool    `yaml:\"addCloudwatchTimestamp\"`\n\tExportAllDataPoints    *bool    `yaml:\"exportAllDataPoints\"`\n}\n\ntype Dimension struct {\n\tName  string `yaml:\"name\"`\n\tValue string `yaml:\"value\"`\n}\n\ntype Role struct {\n\tRoleArn    string `yaml:\"roleArn\"`\n\tExternalID string `yaml:\"externalId\"`\n}\n\nfunc (r *Role) ValidateRole(roleIdx int, parent string) error {\n\tif r.RoleArn == \"\" && r.ExternalID != \"\" {\n\t\treturn fmt.Errorf(\"Role [%d] in %v: RoleArn should not be empty\", roleIdx, parent)\n\t}\n\n\treturn nil\n}\n\nfunc (c *ScrapeConf) Load(file string, logger *slog.Logger) (model.JobsConfig, error) {\n\tyamlFile, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn model.JobsConfig{}, err\n\t}\n\terr = yaml.Unmarshal(yamlFile, c)\n\tif err != nil {\n\t\treturn model.JobsConfig{}, err\n\t}\n\n\tlogConfigErrors(yamlFile, logger)\n\n\tfor _, job := range c.Discovery.Jobs {\n\t\tif len(job.Roles) == 0 {\n\t\t\tjob.Roles = []Role{{}} // use current IAM role\n\t\t}\n\t}\n\n\tfor _, job := range c.CustomNamespace {\n\t\tif len(job.Roles) == 0 {\n\t\t\tjob.Roles = []Role{{}} // use current IAM role\n\t\t}\n\t}\n\n\tfor _, job := range c.Static {\n\t\tif len(job.Roles) == 0 {\n\t\t\tjob.Roles = []Role{{}} // use current IAM role\n\t\t}\n\t}\n\n\treturn c.Validate(logger)\n}\n\nfunc (c *ScrapeConf) Validate(logger *slog.Logger) (model.JobsConfig, error) {\n\tif c.Discovery.Jobs == nil && c.Static == nil && c.CustomNamespace == nil {\n\t\treturn model.JobsConfig{}, fmt.Errorf(\"at least 1 Discovery job, 1 Static or one CustomNamespace must be defined\")\n\t}\n\n\tif c.Discovery.Jobs != nil {\n\t\tfor idx, job := range c.Discovery.Jobs {\n\t\t\terr := job.validateDiscoveryJob(logger, idx)\n\t\t\tif err != nil {\n\t\t\t\treturn model.JobsConfig{}, err\n\t\t\t}\n\t\t}\n\n\t\tif len(c.Discovery.ExportedTagsOnMetrics) > 0 {\n\t\t\tfor ns := range c.Discovery.ExportedTagsOnMetrics {\n\t\t\t\tif svc := SupportedServices.GetService(ns); svc == nil {\n\t\t\t\t\tif svc = SupportedServices.getServiceByAlias(ns); svc != nil {\n\t\t\t\t\t\treturn model.JobsConfig{}, fmt.Errorf(\"Discovery jobs: Invalid key in 'exportedTagsOnMetrics', use namespace %q rather than alias %q\", svc.Namespace, svc.Alias)\n\t\t\t\t\t}\n\t\t\t\t\treturn model.JobsConfig{}, fmt.Errorf(\"Discovery jobs: 'exportedTagsOnMetrics' key is not a valid namespace: %s\", ns)\n\t\t\t\t}\n\n\t\t\t\tjobTypeMatch := false\n\t\t\t\tfor _, job := range c.Discovery.Jobs {\n\t\t\t\t\tif job.Type == ns {\n\t\t\t\t\t\tjobTypeMatch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !jobTypeMatch {\n\t\t\t\t\treturn model.JobsConfig{}, fmt.Errorf(\"Discovery jobs: 'exportedTagsOnMetrics' key %q does not match with any discovery job type\", ns)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.CustomNamespace != nil {\n\t\tfor idx, job := range c.CustomNamespace {\n\t\t\terr := job.validateCustomNamespaceJob(logger, idx)\n\t\t\tif err != nil {\n\t\t\t\treturn model.JobsConfig{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.Static != nil {\n\t\tfor idx, job := range c.Static {\n\t\t\terr := job.validateStaticJob(logger, idx)\n\t\t\tif err != nil {\n\t\t\t\treturn model.JobsConfig{}, err\n\t\t\t}\n\t\t}\n\t}\n\tif c.APIVersion != \"\" && c.APIVersion != \"v1alpha1\" {\n\t\treturn model.JobsConfig{}, fmt.Errorf(\"unknown apiVersion value '%s'\", c.APIVersion)\n\t}\n\n\treturn c.toModelConfig(), nil\n}\n\nfunc (j *Job) validateDiscoveryJob(logger *slog.Logger, jobIdx int) error {\n\tif j.Type != \"\" {\n\t\tif svc := SupportedServices.GetService(j.Type); svc == nil {\n\t\t\tif svc = SupportedServices.getServiceByAlias(j.Type); svc != nil {\n\t\t\t\treturn fmt.Errorf(\"Discovery job [%d]: Invalid 'type' field, use namespace %q rather than alias %q\", jobIdx, svc.Namespace, svc.Alias)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"Discovery job [%d]: Service is not in known list!: %s\", jobIdx, j.Type)\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"Discovery job [%d]: Type should not be empty\", jobIdx)\n\t}\n\tparent := fmt.Sprintf(\"Discovery job [%s/%d]\", j.Type, jobIdx)\n\tif len(j.Roles) > 0 {\n\t\tfor roleIdx, role := range j.Roles {\n\t\t\tif err := role.ValidateRole(roleIdx, parent); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"no IAM roles configured. If the current IAM role is desired, an empty Role should be configured\")\n\t}\n\tif len(j.Regions) == 0 {\n\t\treturn fmt.Errorf(\"Discovery job [%s/%d]: Regions should not be empty\", j.Type, jobIdx)\n\t}\n\tif len(j.Metrics) == 0 && len(j.EnhancedMetrics) == 0 {\n\t\treturn fmt.Errorf(\"Discovery job [%s/%d]: Metrics and EnhancedMetrics should not both be empty\", j.Type, jobIdx)\n\t}\n\tfor metricIdx, metric := range j.Metrics {\n\t\terr := metric.validateMetric(logger, metricIdx, parent, &j.JobLevelMetricFields)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, st := range j.SearchTags {\n\t\tif _, err := regexp.Compile(st.Value); err != nil {\n\t\t\treturn fmt.Errorf(\"Discovery job [%s/%d]: search tag value for %s has invalid regex value %s: %w\", j.Type, jobIdx, st.Key, st.Value, err)\n\t\t}\n\t}\n\n\tif j.RoundingPeriod != nil {\n\t\tlogger.Warn(fmt.Sprintf(\"Discovery job [%s/%d]: Setting a rounding period is deprecated. In a future release it will always be enabled and set to the value of the metric period.\", j.Type, jobIdx))\n\t}\n\n\tif len(j.EnhancedMetrics) > 0 {\n\t\tsvc, err := enhancedmetrics.DefaultEnhancedMetricServiceRegistry.GetEnhancedMetricsService(j.Type)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Discovery job [%s/%d]: enhanced metrics are not supported for this namespace: %w\", j.Type, jobIdx, err)\n\t\t}\n\n\t\tfor _, em := range j.EnhancedMetrics {\n\t\t\tif !svc.IsMetricSupported(em.Name) {\n\t\t\t\treturn fmt.Errorf(\"Discovery job [%s/%d]: enhanced metric %q is not supported for this namespace\", j.Type, jobIdx, em.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (j *CustomNamespace) validateCustomNamespaceJob(logger *slog.Logger, jobIdx int) error {\n\tif j.Name == \"\" {\n\t\treturn fmt.Errorf(\"CustomNamespace job [%v]: Name should not be empty\", jobIdx)\n\t}\n\tif j.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"CustomNamespace job [%v]: Namespace should not be empty\", jobIdx)\n\t}\n\tparent := fmt.Sprintf(\"CustomNamespace job [%s/%d]\", j.Namespace, jobIdx)\n\tif len(j.Roles) > 0 {\n\t\tfor roleIdx, role := range j.Roles {\n\t\t\tif err := role.ValidateRole(roleIdx, parent); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"no IAM roles configured. If the current IAM role is desired, an empty Role should be configured\")\n\t}\n\tif len(j.Regions) == 0 {\n\t\treturn fmt.Errorf(\"CustomNamespace job [%s/%d]: Regions should not be empty\", j.Name, jobIdx)\n\t}\n\tif len(j.Metrics) == 0 {\n\t\treturn fmt.Errorf(\"CustomNamespace job [%s/%d]: Metrics should not be empty\", j.Name, jobIdx)\n\t}\n\tfor metricIdx, metric := range j.Metrics {\n\t\terr := metric.validateMetric(logger, metricIdx, parent, &j.JobLevelMetricFields)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif j.RoundingPeriod != nil {\n\t\tlogger.Warn(fmt.Sprintf(\"CustomNamespace job [%s/%d]: Setting a rounding period is deprecated. It is always enabled and set to the value of the metric period.\", j.Name, jobIdx))\n\t}\n\treturn nil\n}\n\nfunc (j *Static) validateStaticJob(logger *slog.Logger, jobIdx int) error {\n\tif j.Name == \"\" {\n\t\treturn fmt.Errorf(\"Static job [%v]: Name should not be empty\", jobIdx)\n\t}\n\tif j.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"Static job [%s/%d]: Namespace should not be empty\", j.Name, jobIdx)\n\t}\n\tparent := fmt.Sprintf(\"Static job [%s/%d]\", j.Name, jobIdx)\n\tif len(j.Roles) > 0 {\n\t\tfor roleIdx, role := range j.Roles {\n\t\t\tif err := role.ValidateRole(roleIdx, parent); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn fmt.Errorf(\"no IAM roles configured. If the current IAM role is desired, an empty Role should be configured\")\n\t}\n\tif len(j.Regions) == 0 {\n\t\treturn fmt.Errorf(\"Static job [%s/%d]: Regions should not be empty\", j.Name, jobIdx)\n\t}\n\tfor metricIdx, metric := range j.Metrics {\n\t\terr := metric.validateMetric(logger, metricIdx, parent, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m *Metric) validateMetric(logger *slog.Logger, metricIdx int, parent string, discovery *JobLevelMetricFields) error {\n\tif m.Name == \"\" {\n\t\treturn fmt.Errorf(\"Metric [%s/%d] in %v: Name should not be empty\", m.Name, metricIdx, parent)\n\t}\n\n\tmStatistics := m.Statistics\n\tif len(mStatistics) == 0 && discovery != nil {\n\t\tif len(discovery.Statistics) > 0 {\n\t\t\tmStatistics = discovery.Statistics\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"Metric [%s/%d] in %v: Statistics should not be empty\", m.Name, metricIdx, parent)\n\t\t}\n\t}\n\n\tmPeriod := m.Period\n\tif mPeriod == 0 {\n\t\tif discovery != nil && discovery.Period != 0 {\n\t\t\tmPeriod = discovery.Period\n\t\t} else {\n\t\t\tmPeriod = model.DefaultPeriodSeconds\n\t\t}\n\t}\n\tif mPeriod < 1 {\n\t\treturn fmt.Errorf(\"Metric [%s/%d] in %v: Period value should be a positive integer\", m.Name, metricIdx, parent)\n\t}\n\tmLength := m.Length\n\tif mLength == 0 {\n\t\tif discovery != nil && discovery.Length != 0 {\n\t\t\tmLength = discovery.Length\n\t\t} else {\n\t\t\tmLength = model.DefaultLengthSeconds\n\t\t}\n\t}\n\n\t// Delay at the metric level has been ignored for an incredibly long time. If we started respecting metric delay\n\t// now a lot of configurations would break on release. This logs a warning for now\n\tif m.Delay != 0 {\n\t\tlogger.Warn(fmt.Sprintf(\"Metric [%s/%d] in %v: Metric is configured with delay that has been being ignored. This behavior will change in the future, if your config works now remove this delay to prevent a future issue.\", m.Name, metricIdx, parent))\n\t}\n\tvar mDelay int64\n\tif discovery != nil && discovery.Delay != 0 {\n\t\tmDelay = discovery.Delay\n\t}\n\n\tmNilToZero := m.NilToZero\n\tif mNilToZero == nil {\n\t\tif discovery != nil && discovery.NilToZero != nil {\n\t\t\tmNilToZero = discovery.NilToZero\n\t\t} else {\n\t\t\tmNilToZero = aws.Bool(false)\n\t\t}\n\t}\n\n\tmAddCloudwatchTimestamp := m.AddCloudwatchTimestamp\n\tif mAddCloudwatchTimestamp == nil {\n\t\tif discovery != nil && discovery.AddCloudwatchTimestamp != nil {\n\t\t\tmAddCloudwatchTimestamp = discovery.AddCloudwatchTimestamp\n\t\t} else {\n\t\t\tmAddCloudwatchTimestamp = aws.Bool(false)\n\t\t}\n\t}\n\n\tmExportAllDataPoints := m.ExportAllDataPoints\n\tif mExportAllDataPoints == nil {\n\t\tif discovery != nil && discovery.ExportAllDataPoints != nil {\n\t\t\tmExportAllDataPoints = discovery.ExportAllDataPoints\n\t\t} else {\n\t\t\tmExportAllDataPoints = aws.Bool(false)\n\t\t}\n\t}\n\n\tif aws.ToBool(mExportAllDataPoints) && !aws.ToBool(mAddCloudwatchTimestamp) {\n\t\treturn fmt.Errorf(\"Metric [%s/%d] in %v: ExportAllDataPoints can only be enabled if AddCloudwatchTimestamp is enabled\", m.Name, metricIdx, parent)\n\t}\n\n\tif mLength < mPeriod {\n\t\treturn fmt.Errorf(\n\t\t\t\"Metric [%s/%d] in %v: length(%d) is smaller than period(%d). This can cause that the data requested is not ready and generate data gaps\",\n\t\t\tm.Name, metricIdx, parent, mLength, mPeriod,\n\t\t)\n\t}\n\tm.Length = mLength\n\tm.Period = mPeriod\n\tm.Delay = mDelay\n\tm.NilToZero = mNilToZero\n\tm.AddCloudwatchTimestamp = mAddCloudwatchTimestamp\n\tm.ExportAllDataPoints = mExportAllDataPoints\n\tm.Statistics = mStatistics\n\n\treturn nil\n}\n\nfunc (c *ScrapeConf) toModelConfig() model.JobsConfig {\n\tjobsCfg := model.JobsConfig{}\n\tjobsCfg.StsRegion = c.StsRegion\n\n\tfor _, discoveryJob := range c.Discovery.Jobs {\n\t\tsvc := SupportedServices.GetService(discoveryJob.Type)\n\n\t\tjob := model.DiscoveryJob{}\n\t\tjob.Regions = discoveryJob.Regions\n\t\tjob.Namespace = svc.Namespace\n\t\tjob.DimensionNameRequirements = discoveryJob.DimensionNameRequirements\n\t\tjob.RecentlyActiveOnly = discoveryJob.RecentlyActiveOnly\n\t\tjob.RoundingPeriod = discoveryJob.RoundingPeriod\n\t\tjob.Roles = toModelRoles(discoveryJob.Roles)\n\t\tjob.SearchTags = toModelSearchTags(discoveryJob.SearchTags)\n\t\tjob.CustomTags = toModelTags(discoveryJob.CustomTags)\n\t\tjob.Metrics = toModelMetricConfig(discoveryJob.Metrics)\n\t\tjob.IncludeContextOnInfoMetrics = discoveryJob.IncludeContextOnInfoMetrics\n\t\tjob.DimensionsRegexps = svc.ToModelDimensionsRegexp()\n\t\tjob.EnhancedMetrics = svc.toModelEnhancedMetricsConfig(discoveryJob.EnhancedMetrics)\n\n\t\tjob.ExportedTagsOnMetrics = []string{}\n\t\tif len(c.Discovery.ExportedTagsOnMetrics) > 0 {\n\t\t\tif exportedTags, ok := c.Discovery.ExportedTagsOnMetrics[svc.Namespace]; ok {\n\t\t\t\tjob.ExportedTagsOnMetrics = exportedTags\n\t\t\t}\n\t\t}\n\n\t\tjobsCfg.DiscoveryJobs = append(jobsCfg.DiscoveryJobs, job)\n\t}\n\n\tfor _, staticJob := range c.Static {\n\t\tjob := model.StaticJob{}\n\t\tjob.Name = staticJob.Name\n\t\tjob.Namespace = staticJob.Namespace\n\t\tjob.Regions = staticJob.Regions\n\t\tjob.Roles = toModelRoles(staticJob.Roles)\n\t\tjob.CustomTags = toModelTags(staticJob.CustomTags)\n\t\tjob.Dimensions = toModelDimensions(staticJob.Dimensions)\n\t\tjob.Metrics = toModelMetricConfig(staticJob.Metrics)\n\t\tjobsCfg.StaticJobs = append(jobsCfg.StaticJobs, job)\n\t}\n\n\tfor _, customNamespaceJob := range c.CustomNamespace {\n\t\tjob := model.CustomNamespaceJob{}\n\t\tjob.Regions = customNamespaceJob.Regions\n\t\tjob.Name = customNamespaceJob.Name\n\t\tjob.Namespace = customNamespaceJob.Namespace\n\t\tjob.DimensionNameRequirements = customNamespaceJob.DimensionNameRequirements\n\t\tjob.RoundingPeriod = customNamespaceJob.RoundingPeriod\n\t\tjob.RecentlyActiveOnly = customNamespaceJob.RecentlyActiveOnly\n\t\tjob.Roles = toModelRoles(customNamespaceJob.Roles)\n\t\tjob.CustomTags = toModelTags(customNamespaceJob.CustomTags)\n\t\tjob.Metrics = toModelMetricConfig(customNamespaceJob.Metrics)\n\t\tjobsCfg.CustomNamespaceJobs = append(jobsCfg.CustomNamespaceJobs, job)\n\t}\n\n\treturn jobsCfg\n}\n\nfunc toModelTags(tags []Tag) []model.Tag {\n\tret := make([]model.Tag, 0, len(tags))\n\tfor _, t := range tags {\n\t\tret = append(ret, model.Tag{\n\t\t\tKey:   t.Key,\n\t\t\tValue: t.Value,\n\t\t})\n\t}\n\treturn ret\n}\n\nfunc toModelSearchTags(tags []Tag) []model.SearchTag {\n\tret := make([]model.SearchTag, 0, len(tags))\n\tfor _, t := range tags {\n\t\t// This should never panic as long as regex validation continues to happen before model mapping\n\t\tr := regexp.MustCompile(t.Value)\n\t\tret = append(ret, model.SearchTag{\n\t\t\tKey:   t.Key,\n\t\t\tValue: r,\n\t\t})\n\t}\n\treturn ret\n}\n\nfunc toModelRoles(roles []Role) []model.Role {\n\tret := make([]model.Role, 0, len(roles))\n\tfor _, r := range roles {\n\t\tret = append(ret, model.Role{\n\t\t\tRoleArn:    r.RoleArn,\n\t\t\tExternalID: r.ExternalID,\n\t\t})\n\t}\n\treturn ret\n}\n\nfunc toModelDimensions(dimensions []Dimension) []model.Dimension {\n\tret := make([]model.Dimension, 0, len(dimensions))\n\tfor _, d := range dimensions {\n\t\tret = append(ret, model.Dimension{\n\t\t\tName:  d.Name,\n\t\t\tValue: d.Value,\n\t\t})\n\t}\n\treturn ret\n}\n\nfunc toModelMetricConfig(metrics []*Metric) []*model.MetricConfig {\n\tret := make([]*model.MetricConfig, 0, len(metrics))\n\tfor _, m := range metrics {\n\t\tret = append(ret, &model.MetricConfig{\n\t\t\tName:                   m.Name,\n\t\t\tStatistics:             m.Statistics,\n\t\t\tPeriod:                 m.Period,\n\t\t\tLength:                 m.Length,\n\t\t\tDelay:                  m.Delay,\n\t\t\tNilToZero:              aws.ToBool(m.NilToZero),\n\t\t\tAddCloudwatchTimestamp: aws.ToBool(m.AddCloudwatchTimestamp),\n\t\t\tExportAllDataPoints:    aws.ToBool(m.ExportAllDataPoints),\n\t\t})\n\t}\n\treturn ret\n}\n\n// logConfigErrors logs as warning any config unmarshalling error.\nfunc logConfigErrors(cfg []byte, logger *slog.Logger) {\n\tvar sc ScrapeConf\n\tvar errMsgs []string\n\tif err := yaml.UnmarshalStrict(cfg, &sc); err != nil {\n\t\tterr := &yaml.TypeError{}\n\t\tif errors.As(err, &terr) {\n\t\t\terrMsgs = append(errMsgs, terr.Errors...)\n\t\t} else {\n\t\t\terrMsgs = append(errMsgs, err.Error())\n\t\t}\n\t}\n\n\tif sc.APIVersion == \"\" {\n\t\terrMsgs = append(errMsgs, \"missing apiVersion\")\n\t}\n\n\tif len(errMsgs) > 0 {\n\t\tfor _, msg := range errMsgs {\n\t\t\tlogger.Warn(\"config file syntax error\", \"err\", msg)\n\t\t}\n\t\tlogger.Warn(`Config file error(s) detected: Yace might not work as expected. Future versions of Yace might fail to run with an invalid config file.`)\n\t}\n}\n"
  },
  {
    "path": "pkg/config/config_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestConfLoad(t *testing.T) {\n\ttestCases := []struct {\n\t\tconfigFile string\n\t}{\n\t\t{configFile: \"config_test.yml\"},\n\t\t{configFile: \"empty_rolearn.ok.yml\"},\n\t\t{configFile: \"sts_region.ok.yml\"},\n\t\t{configFile: \"multiple_roles.ok.yml\"},\n\t\t{configFile: \"custom_namespace.ok.yml\"},\n\t}\n\tfor _, tc := range testCases {\n\t\tconfig := ScrapeConf{}\n\t\tconfigFile := fmt.Sprintf(\"testdata/%s\", tc.configFile)\n\t\tif _, err := config.Load(configFile, promslog.NewNopLogger()); err != nil {\n\t\t\tt.Error(err)\n\t\t\tt.FailNow()\n\t\t}\n\t}\n}\n\nfunc TestBadConfigs(t *testing.T) {\n\ttestCases := []struct {\n\t\tconfigFile string\n\t\terrorMsg   string\n\t}{\n\t\t{\n\t\t\tconfigFile: \"externalid_without_rolearn.bad.yml\",\n\t\t\terrorMsg:   \"RoleArn should not be empty\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"externalid_with_empty_rolearn.bad.yml\",\n\t\t\terrorMsg:   \"RoleArn should not be empty\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"unknown_version.bad.yml\",\n\t\t\terrorMsg:   \"unknown apiVersion value 'invalidVersion'\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"custom_namespace_without_name.bad.yml\",\n\t\t\terrorMsg:   \"Name should not be empty\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"custom_namespace_without_namespace.bad.yml\",\n\t\t\terrorMsg:   \"Namespace should not be empty\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"custom_namespace_without_region.bad.yml\",\n\t\t\terrorMsg:   \"Regions should not be empty\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"discovery_job_type_unknown.bad.yml\",\n\t\t\terrorMsg:   \"Discovery job [0]: Service is not in known list!: AWS/FancyNewNamespace\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"discovery_job_type_alias.bad.yml\",\n\t\t\terrorMsg:   \"Discovery job [0]: Invalid 'type' field, use namespace \\\"AWS/S3\\\" rather than alias \\\"s3\\\"\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"discovery_job_exported_tags_alias.bad.yml\",\n\t\t\terrorMsg:   \"Discovery jobs: Invalid key in 'exportedTagsOnMetrics', use namespace \\\"AWS/S3\\\" rather than alias \\\"s3\\\"\",\n\t\t},\n\t\t{\n\t\t\tconfigFile: \"discovery_job_exported_tags_mismatch.bad.yml\",\n\t\t\terrorMsg:   \"Discovery jobs: 'exportedTagsOnMetrics' key \\\"AWS/RDS\\\" does not match with any discovery job type\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tconfig := ScrapeConf{}\n\t\tconfigFile := fmt.Sprintf(\"testdata/%s\", tc.configFile)\n\t\tif _, err := config.Load(configFile, promslog.NewNopLogger()); err != nil {\n\t\t\tif !strings.Contains(err.Error(), tc.errorMsg) {\n\t\t\t\tt.Errorf(\"expecter error for config file %q to contain %q but got: %s\", tc.configFile, tc.errorMsg, err)\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t} else {\n\t\t\tt.Log(\"expected validation error\")\n\t\t\tt.FailNow()\n\t\t}\n\t}\n}\n\nfunc TestValidateConfigFailuresWhenUsingAsLibrary(t *testing.T) {\n\ttype testcase struct {\n\t\tconfig   ScrapeConf\n\t\terrorMsg string\n\t}\n\ttestCases := map[string]testcase{\n\t\t\"empty role should be configured when environment role is desired\": {\n\t\t\tconfig: ScrapeConf{\n\t\t\t\tAPIVersion: \"v1alpha1\",\n\t\t\t\tStsRegion:  \"us-east-2\",\n\t\t\t\tDiscovery: Discovery{\n\t\t\t\t\tJobs: []*Job{{\n\t\t\t\t\t\tRegions: []string{\"us-east-2\"},\n\t\t\t\t\t\tType:    \"AWS/SQS\",\n\t\t\t\t\t\tMetrics: []*Metric{{\n\t\t\t\t\t\t\tName:       \"NumberOfMessagesSent\",\n\t\t\t\t\t\t\tStatistics: []string{\"Average\"},\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrorMsg: \"no IAM roles configured. If the current IAM role is desired, an empty Role should be configured\",\n\t\t},\n\t\t\"enhanced metric are not supported for the namespace\": {\n\t\t\tconfig: ScrapeConf{\n\t\t\t\tDiscovery: Discovery{\n\t\t\t\t\tJobs: []*Job{{\n\t\t\t\t\t\tRegions: []string{\"us-east-2\"},\n\t\t\t\t\t\tType:    \"AWS/S3\",\n\t\t\t\t\t\tRoles:   []Role{{RoleArn: \"arn:aws:iam::123456789012:role/test\"}},\n\t\t\t\t\t\tMetrics: []*Metric{{\n\t\t\t\t\t\t\tName:       \"BucketSizeBytes\",\n\t\t\t\t\t\t\tStatistics: []string{\"Average\"},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tEnhancedMetrics: []*EnhancedMetric{{\n\t\t\t\t\t\t\tName: \"SomeEnhancedMetric\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrorMsg: \"Discovery job [AWS/S3/0]: enhanced metrics are not supported for this namespace: enhanced metrics service for namespace AWS/S3 not found\",\n\t\t},\n\t\t\"enhanced metric are not supported for the enhanced mertrics service\": {\n\t\t\tconfig: ScrapeConf{\n\t\t\t\tDiscovery: Discovery{\n\t\t\t\t\tJobs: []*Job{{\n\t\t\t\t\t\tRegions: []string{\"us-east-2\"},\n\t\t\t\t\t\tType:    \"AWS/Lambda\",\n\t\t\t\t\t\tRoles:   []Role{{RoleArn: \"arn:aws:iam::123456789012:role/test\"}},\n\t\t\t\t\t\tMetrics: []*Metric{{\n\t\t\t\t\t\t\tName:       \"BucketSizeBytes\",\n\t\t\t\t\t\t\tStatistics: []string{\"Average\"},\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tEnhancedMetrics: []*EnhancedMetric{{\n\t\t\t\t\t\t\tName: \"SomeEnhancedMetric\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrorMsg: \"Discovery job [AWS/Lambda/0]: enhanced metric \\\"SomeEnhancedMetric\\\" is not supported for this namespace\",\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\t_, err := tc.config.Validate(promslog.NewNopLogger())\n\t\t\trequire.Error(t, err, \"Expected config validation to fail\")\n\t\t\trequire.Equal(t, tc.errorMsg, err.Error())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/config/feature_flags.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport \"context\"\n\ntype flagsCtxKey struct{}\n\n// AlwaysReturnInfoMetrics is a feature flag used to enable the return of info metrics even when there are no corresponding CloudWatch metrics\nconst AlwaysReturnInfoMetrics = \"always-return-info-metrics\"\n\n// FeatureFlags is an interface all objects that can tell wether or not a feature flag is enabled can implement.\ntype FeatureFlags interface {\n\t// IsFeatureEnabled tells if the feature flag identified by flag is enabled.\n\tIsFeatureEnabled(flag string) bool\n}\n\n// CtxWithFlags injects a FeatureFlags inside a given context.Context, so that they are easily communicated through layers.\nfunc CtxWithFlags(ctx context.Context, ctrl FeatureFlags) context.Context {\n\treturn context.WithValue(ctx, flagsCtxKey{}, ctrl)\n}\n\n// FlagsFromCtx retrieves a FeatureFlags from a given context.Context, defaulting to one with all feature flags disabled if none is found.\nfunc FlagsFromCtx(ctx context.Context) FeatureFlags {\n\tif ctrl := ctx.Value(flagsCtxKey{}); ctrl != nil {\n\t\treturn ctrl.(FeatureFlags)\n\t}\n\treturn noFeatureFlags{}\n}\n\n// noFeatureFlags implements a no-op FeatureFlags\ntype noFeatureFlags struct{}\n\nfunc (nff noFeatureFlags) IsFeatureEnabled(_ string) bool {\n\treturn false\n}\n"
  },
  {
    "path": "pkg/config/feature_flags_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestFeatureFlagsInContext_DefaultsToNonEnabled(t *testing.T) {\n\tflags := FlagsFromCtx(context.Background())\n\trequire.False(t, flags.IsFeatureEnabled(\"some-feature\"))\n\trequire.False(t, flags.IsFeatureEnabled(\"some-other-feature\"))\n}\n\ntype flags struct{}\n\nfunc (f flags) IsFeatureEnabled(_ string) bool {\n\treturn true\n}\n\nfunc TestFeatureFlagsInContext_RetrievesFlagsFromContext(t *testing.T) {\n\tctx := CtxWithFlags(context.Background(), flags{})\n\trequire.True(t, FlagsFromCtx(ctx).IsFeatureEnabled(\"some-feature\"))\n\trequire.True(t, FlagsFromCtx(ctx).IsFeatureEnabled(\"some-other-feature\"))\n}\n"
  },
  {
    "path": "pkg/config/services.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"strings\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/grafana/regexp\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// ServiceConfig defines a namespace supported by discovery jobs.\ntype ServiceConfig struct {\n\t// Namespace is the formal AWS namespace identification string\n\tNamespace string\n\t// Alias is the formal AWS namespace alias\n\tAlias string\n\t// ResourceFilters is a list of strings used as filters in the\n\t// resourcegroupstaggingapi.GetResources request. It should always\n\t// be provided, except for those few namespaces where resources can't\n\t// be tagged.\n\tResourceFilters []*string\n\t// DimensionRegexps is an optional list of regexes that allow to\n\t// extract dimensions names from a resource ARN. The regex should\n\t// use named groups that correspond to AWS dimensions names.\n\t// In cases where the dimension name has a space, it should be\n\t// replaced with an underscore (`_`).\n\tDimensionRegexps []*regexp.Regexp\n}\n\nfunc (sc ServiceConfig) ToModelDimensionsRegexp() []model.DimensionsRegexp {\n\tdr := []model.DimensionsRegexp{}\n\n\tfor _, dimensionRegexp := range sc.DimensionRegexps {\n\t\tnames := dimensionRegexp.SubexpNames()\n\t\tdimensionNames := make([]string, 0, len(names)-1)\n\n\t\t// skip first name, it's always an empty string\n\t\tfor i := 1; i < len(names); i++ {\n\t\t\t// in the regex names we use underscores where AWS dimensions have spaces\n\t\t\tdimensionNames = append(dimensionNames, strings.ReplaceAll(names[i], \"_\", \" \"))\n\t\t}\n\n\t\tdr = append(dr, model.DimensionsRegexp{\n\t\t\tRegexp:          dimensionRegexp,\n\t\t\tDimensionsNames: dimensionNames,\n\t\t})\n\t}\n\n\treturn dr\n}\n\nfunc (sc ServiceConfig) toModelEnhancedMetricsConfig(ems []*EnhancedMetric) []*model.EnhancedMetricConfig {\n\temc := make([]*model.EnhancedMetricConfig, 0, len(ems))\n\n\tfor _, em := range ems {\n\t\temc = append(emc, &model.EnhancedMetricConfig{\n\t\t\tName: em.Name,\n\t\t})\n\t}\n\n\treturn emc\n}\n\ntype serviceConfigs []ServiceConfig\n\nfunc (sc serviceConfigs) GetService(serviceType string) *ServiceConfig {\n\tfor _, sf := range sc {\n\t\tif sf.Namespace == serviceType {\n\t\t\treturn &sf\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (sc serviceConfigs) getServiceByAlias(alias string) *ServiceConfig {\n\tfor _, sf := range sc {\n\t\tif sf.Alias == alias {\n\t\t\treturn &sf\n\t\t}\n\t}\n\treturn nil\n}\n\nvar SupportedServices = serviceConfigs{\n\t{\n\t\tNamespace: \"CWAgent\",\n\t\tAlias:     \"cwagent\",\n\t},\n\t{\n\t\tNamespace: \"AWS/Usage\",\n\t\tAlias:     \"usage\",\n\t},\n\t{\n\t\tNamespace: \"AWS/CertificateManager\",\n\t\tAlias:     \"acm\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"acm:certificate\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ACMPrivateCA\",\n\t\tAlias:     \"acm-pca\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"acm-pca:certificate-authority\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<PrivateCAArn>.*)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AmazonMWAA\",\n\t\tAlias:     \"airflow\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"airflow\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MWAA\",\n\t\tAlias:     \"mwaa\",\n\t},\n\t{\n\t\tNamespace: \"AWS/ApplicationELB\",\n\t\tAlias:     \"alb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticloadbalancing:loadbalancer/app\"),\n\t\t\taws.String(\"elasticloadbalancing:targetgroup\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":(?P<TargetGroup>targetgroup/.+)\"),\n\t\t\tregexp.MustCompile(\":loadbalancer/(?P<LoadBalancer>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/AppStream\",\n\t\tAlias:     \"appstream\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"appstream\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":fleet/(?P<FleetName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Backup\",\n\t\tAlias:     \"backup\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"backup\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":backup-vault:(?P<BackupVaultName>[^:]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ApiGateway\",\n\t\tAlias:     \"apigateway\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"apigateway\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\t// DimensionRegexps starting with 'restapis' are for APIGateway V1 gateways (REST API gateways)\n\t\t\tregexp.MustCompile(\"/restapis/(?P<ApiName>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\"/restapis/(?P<ApiName>[^/]+)/stages/(?P<Stage>[^/]+)$\"),\n\t\t\t// DimensionRegexps starting 'apis' are for APIGateway V2 gateways (HTTP and Websocket gateways)\n\t\t\tregexp.MustCompile(\"/apis/(?P<ApiId>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\"/apis/(?P<ApiId>[^/]+)/stages/(?P<Stage>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\"/apis/(?P<ApiId>[^/]+)/routes/(?P<Route>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/AmazonMQ\",\n\t\tAlias:     \"mq\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"mq\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"broker:(?P<Broker>[^:]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/AppRunner\",\n\t\tAlias:     \"apprunner\",\n\t},\n\t{\n\t\tNamespace: \"AWS/AppSync\",\n\t\tAlias:     \"appsync\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"appsync\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"apis/(?P<GraphQLAPIId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Athena\",\n\t\tAlias:     \"athena\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"athena\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"workgroup/(?P<WorkGroup>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/AutoScaling\",\n\t\tAlias:     \"asg\",\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"autoScalingGroupName/(?P<AutoScalingGroupName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ElasticBeanstalk\",\n\t\tAlias:     \"beanstalk\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticbeanstalk:environment\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\t// arn uses /${ApplicationName}/${EnvironmentName}, but only EnvironmentName is a Metric Dimension\n\t\t\tregexp.MustCompile(\"environment/[^/]+/(?P<EnvironmentName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Billing\",\n\t\tAlias:     \"billing\",\n\t},\n\t{\n\t\tNamespace: \"AWS/Cassandra\",\n\t\tAlias:     \"cassandra\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"cassandra\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"keyspace/(?P<Keyspace>[^/]+)/table/(?P<TableName>[^/]+)\"),\n\t\t\tregexp.MustCompile(\"keyspace/(?P<Keyspace>[^/]+)/\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/CloudFront\",\n\t\tAlias:     \"cloudfront\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"cloudfront:distribution\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"distribution/(?P<DistributionId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Cognito\",\n\t\tAlias:     \"cognito-idp\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"cognito-idp:userpool\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"userpool/(?P<UserPool>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DataSync\",\n\t\tAlias:     \"datasync\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"datasync:task\"),\n\t\t\taws.String(\"datasync:agent\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":task/(?P<TaskId>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":agent/(?P<AgentId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DirectoryService\",\n\t\tAlias:     \"ds\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ds:directory\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":directory/(?P<Directory_ID>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DMS\",\n\t\tAlias:     \"dms\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"dms\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"rep:[^/]+/(?P<ReplicationInstanceIdentifier>[^/]+)\"),\n\t\t\tregexp.MustCompile(\"task:(?P<ReplicationTaskIdentifier>[^/]+)/(?P<ReplicationInstanceIdentifier>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DDoSProtection\",\n\t\tAlias:     \"shield\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"shield:protection\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<ResourceArn>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DocDB\",\n\t\tAlias:     \"docdb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"rds:db\"),\n\t\t\taws.String(\"rds:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"cluster:(?P<DBClusterIdentifier>[^/]+)\"),\n\t\t\tregexp.MustCompile(\"db:(?P<DBInstanceIdentifier>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DX\",\n\t\tAlias:     \"dx\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"directconnect\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":dxcon/(?P<ConnectionId>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":dxlag/(?P<LagId>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":dxvif/(?P<VirtualInterfaceId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/DynamoDB\",\n\t\tAlias:     \"dynamodb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"dynamodb:table\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":table/(?P<TableName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EBS\",\n\t\tAlias:     \"ebs\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:volume\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"volume/(?P<VolumeId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ElastiCache\",\n\t\tAlias:     \"ec\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticache:cluster\"),\n\t\t\taws.String(\"elasticache:serverlesscache\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"cluster:(?P<CacheClusterId>[^/]+)\"),\n\t\t\tregexp.MustCompile(\"serverlesscache:(?P<clusterId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MemoryDB\",\n\t\tAlias:     \"memorydb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"memorydb:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"cluster/(?P<ClusterName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EC2\",\n\t\tAlias:     \"ec2\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:instance\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"instance/(?P<InstanceId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EC2Spot\",\n\t\tAlias:     \"ec2Spot\",\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<FleetRequestId>.*)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EC2CapacityReservations\",\n\t\tAlias:     \"ec2CapacityReservations\",\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":capacity-reservation/(?P<CapacityReservationId>)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ECS\",\n\t\tAlias:     \"ecs-svc\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ecs:cluster\"),\n\t\t\taws.String(\"ecs:service\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster/(?P<ClusterName>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\":service/(?P<ClusterName>[^/]+)/(?P<ServiceName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"ECS/ContainerInsights\",\n\t\tAlias:     \"ecs-containerinsights\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ecs:cluster\"),\n\t\t\taws.String(\"ecs:service\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\t// Use \"new\" long arns as per\n\t\t\t// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids\n\t\t\tregexp.MustCompile(\":cluster/(?P<ClusterName>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\":service/(?P<ClusterName>[^/]+)/(?P<ServiceName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"ContainerInsights\",\n\t\tAlias:     \"containerinsights\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"eks:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster/(?P<ClusterName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EFS\",\n\t\tAlias:     \"efs\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticfilesystem:file-system\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"file-system/(?P<FileSystemId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EKS\",\n\t\tAlias:     \"eks\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"eks:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster/(?P<ClusterName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ELB\",\n\t\tAlias:     \"elb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticloadbalancing:loadbalancer\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":loadbalancer/(?P<LoadBalancerName>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ElasticMapReduce\",\n\t\tAlias:     \"emr\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticmapreduce:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"cluster/(?P<JobFlowId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/EMRServerless\",\n\t\tAlias:     \"emr-serverless\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"emr-serverless:applications\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"applications/(?P<ApplicationId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ES\",\n\t\tAlias:     \"es\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"es:domain\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":domain/(?P<DomainName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Firehose\",\n\t\tAlias:     \"firehose\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"firehose\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":deliverystream/(?P<DeliveryStreamName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/FSx\",\n\t\tAlias:     \"fsx\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"fsx:file-system\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"file-system/(?P<FileSystemId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/GameLift\",\n\t\tAlias:     \"gamelift\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"gamelift\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":fleet/(?P<FleetId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/GatewayELB\",\n\t\tAlias:     \"gwlb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticloadbalancing:loadbalancer\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":(?P<TargetGroup>targetgroup/.+)\"),\n\t\t\tregexp.MustCompile(\":loadbalancer/(?P<LoadBalancer>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/GlobalAccelerator\",\n\t\tAlias:     \"ga\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"globalaccelerator\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"accelerator/(?P<Accelerator>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\"accelerator/(?P<Accelerator>[^/]+)/listener/(?P<Listener>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\"accelerator/(?P<Accelerator>[^/]+)/listener/(?P<Listener>[^/]+)/endpoint-group/(?P<EndpointGroup>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"Glue\",\n\t\tAlias:     \"glue\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"glue:job\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":job/(?P<JobName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/IoT\",\n\t\tAlias:     \"iot\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"iot:rule\"),\n\t\t\taws.String(\"iot:provisioningtemplate\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":rule/(?P<RuleName>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":provisioningtemplate/(?P<TemplateName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Kafka\",\n\t\tAlias:     \"kafka\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"kafka:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster/(?P<Cluster_Name>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/KafkaConnect\",\n\t\tAlias:     \"kafkaconnect\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"kafka:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":connector/(?P<Connector_Name>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Kinesis\",\n\t\tAlias:     \"kinesis\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"kinesis:stream\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":stream/(?P<StreamName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/KinesisAnalytics\",\n\t\tAlias:     \"kinesis-analytics\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"kinesisanalytics:application\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":application/(?P<Application>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/KMS\",\n\t\tAlias:     \"kms\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"kms:key\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":key/(?P<KeyId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Lambda\",\n\t\tAlias:     \"lambda\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"lambda:function\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":function:(?P<FunctionName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Logs\",\n\t\tAlias:     \"logs\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"logs:log-group\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":log-group:(?P<LogGroupName>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MediaConnect\",\n\t\tAlias:     \"mediaconnect\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"mediaconnect:flow\"),\n\t\t\taws.String(\"mediaconnect:source\"),\n\t\t\taws.String(\"mediaconnect:output\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"^(?P<FlowARN>.*:flow:.*)$\"),\n\t\t\tregexp.MustCompile(\"^(?P<SourceARN>.*:source:.*)$\"),\n\t\t\tregexp.MustCompile(\"^(?P<OutputARN>.*:output:.*)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MediaConvert\",\n\t\tAlias:     \"mediaconvert\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"mediaconvert\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<Queue>.*:.*:mediaconvert:.*:queues/.*)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MediaPackage\",\n\t\tAlias:     \"mediapackage\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"mediapackage\"),\n\t\t\taws.String(\"mediapackagev2\"),\n\t\t\taws.String(\"mediapackage-vod\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":channels/(?P<IngestEndpoint>.+)$\"),\n\t\t\tregexp.MustCompile(\":packaging-configurations/(?P<PackagingConfiguration>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MediaLive\",\n\t\tAlias:     \"medialive\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"medialive:channel\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":channel:(?P<ChannelId>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/MediaTailor\",\n\t\tAlias:     \"mediatailor\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"mediatailor:playbackConfiguration\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"playbackConfiguration/(?P<ConfigurationName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Neptune\",\n\t\tAlias:     \"neptune\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"rds:db\"),\n\t\t\taws.String(\"rds:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster:(?P<DBClusterIdentifier>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":db:(?P<DBInstanceIdentifier>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/NetworkFirewall\",\n\t\tAlias:     \"nfw\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"network-firewall:firewall\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"firewall/(?P<FirewallName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/NATGateway\",\n\t\tAlias:     \"ngw\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:natgateway\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"natgateway/(?P<NatGatewayId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/NetworkELB\",\n\t\tAlias:     \"nlb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"elasticloadbalancing:loadbalancer/net\"),\n\t\t\taws.String(\"elasticloadbalancing:targetgroup\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":(?P<TargetGroup>targetgroup/.+)\"),\n\t\t\tregexp.MustCompile(\":loadbalancer/(?P<LoadBalancer>.+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/PrivateLinkEndpoints\",\n\t\tAlias:     \"vpc-endpoint\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:vpc-endpoint\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":vpc-endpoint/(?P<VPC_Endpoint_Id>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/PrivateLinkServices\",\n\t\tAlias:     \"vpc-endpoint-service\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:vpc-endpoint-service\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":vpc-endpoint-service/(?P<Service_Id>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Prometheus\",\n\t\tAlias:     \"amp\",\n\t},\n\t{\n\t\tNamespace: \"AWS/QLDB\",\n\t\tAlias:     \"qldb\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"qldb\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":ledger/(?P<LedgerName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/QuickSight\",\n\t\tAlias:     \"quicksight\",\n\t},\n\t{\n\t\tNamespace: \"AWS/RDS\",\n\t\tAlias:     \"rds\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"rds:db\"),\n\t\t\taws.String(\"rds:cluster\"),\n\t\t\taws.String(\"rds:db-proxy\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster:(?P<DBClusterIdentifier>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":db:(?P<DBInstanceIdentifier>[^/]+)\"),\n\t\t\tregexp.MustCompile(\":db-proxy:(?P<ProxyIdentifier>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Redshift\",\n\t\tAlias:     \"redshift\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"redshift:cluster\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":cluster:(?P<ClusterIdentifier>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Redshift-Serverless\",\n\t\tAlias:     \"redshift\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"redshift-serverless:workgroup\"),\n\t\t\taws.String(\"redshift-serverless:namespace\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Route53Resolver\",\n\t\tAlias:     \"route53-resolver\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"route53resolver\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":resolver-endpoint/(?P<EndpointId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Route53\",\n\t\tAlias:     \"route53\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"route53\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":healthcheck/(?P<HealthCheckId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/RUM\",\n\t\tAlias:     \"rum\",\n\t},\n\t{\n\t\tNamespace: \"AWS/S3\",\n\t\tAlias:     \"s3\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"s3\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<BucketName>[^:]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Scheduler\",\n\t\tAlias:     \"scheduler\",\n\t},\n\t{\n\t\tNamespace: \"AWS/ECR\",\n\t\tAlias:     \"ecr\",\n\t},\n\t{\n\t\tNamespace: \"AWS/Timestream\",\n\t\tAlias:     \"timestream\",\n\t},\n\t{\n\t\tNamespace: \"AWS/SecretsManager\",\n\t\tAlias:     \"secretsmanager\",\n\t},\n\t{\n\t\tNamespace: \"AWS/SES\",\n\t\tAlias:     \"ses\",\n\t},\n\t{\n\t\tNamespace: \"AWS/States\",\n\t\tAlias:     \"sfn\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"states\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<StateMachineArn>.*)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/SNS\",\n\t\tAlias:     \"sns\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sns\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<TopicName>[^:]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/SQS\",\n\t\tAlias:     \"sqs\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sqs\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<QueueName>[^:]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/StorageGateway\",\n\t\tAlias:     \"storagegateway\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"storagegateway\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":gateway/(?P<GatewayId>[^:]+)$\"),\n\t\t\tregexp.MustCompile(\":share/(?P<ShareId>[^:]+)$\"),\n\t\t\tregexp.MustCompile(\"^(?P<GatewayId>[^:/]+)/(?P<GatewayName>[^:]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Transfer\",\n\t\tAlias:     \"transfer\",\n\t},\n\t{\n\t\tNamespace: \"AWS/TransitGateway\",\n\t\tAlias:     \"tgw\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:transit-gateway\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":transit-gateway/(?P<TransitGateway>[^/]+)\"),\n\t\t\tregexp.MustCompile(\"(?P<TransitGateway>[^/]+)/(?P<TransitGatewayAttachment>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/TrustedAdvisor\",\n\t\tAlias:     \"trustedadvisor\",\n\t},\n\t{\n\t\tNamespace: \"AWS/VPN\",\n\t\tAlias:     \"vpn\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:vpn-connection\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":vpn-connection/(?P<VpnId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/ClientVPN\",\n\t\tAlias:     \"clientvpn\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:client-vpn-endpoint\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":client-vpn-endpoint/(?P<Endpoint>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/WAFV2\",\n\t\tAlias:     \"wafv2\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"wafv2\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"/webacl/(?P<WebACL>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/WorkSpaces\",\n\t\tAlias:     \"workspaces\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"workspaces:workspace\"),\n\t\t\taws.String(\"workspaces:directory\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":workspace/(?P<WorkspaceId>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\":directory/(?P<DirectoryId>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/AOSS\",\n\t\tAlias:     \"aoss\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"aoss:collection\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":collection/(?P<CollectionId>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/SageMaker\",\n\t\tAlias:     \"sagemaker\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:endpoint\"),\n\t\t\taws.String(\"sagemaker:inference-component\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":endpoint/(?P<EndpointName>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\":inference-component/(?P<InferenceComponentName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/Endpoints\",\n\t\tAlias:     \"sagemaker-endpoints\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:endpoint\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":endpoint/(?P<EndpointName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/InferenceComponents\",\n\t\tAlias:     \"sagemaker-inference-components\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:inference-component\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":inference-component/(?P<InferenceComponentName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/TrainingJobs\",\n\t\tAlias:     \"sagemaker-training\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:training-job\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/ProcessingJobs\",\n\t\tAlias:     \"sagemaker-processing\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:processing-job\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/TransformJobs\",\n\t\tAlias:     \"sagemaker-transform\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:transform-job\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"/aws/sagemaker/InferenceRecommendationsJobs\",\n\t\tAlias:     \"sagemaker-inf-rec\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:inference-recommendations-job\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":inference-recommendations-job/(?P<JobName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Sagemaker/ModelBuildingPipeline\",\n\t\tAlias:     \"sagemaker-model-building-pipeline\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"sagemaker:pipeline\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":pipeline/(?P<PipelineName>[^/]+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/IPAM\",\n\t\tAlias:     \"ipam\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"ec2:ipam-pool\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":ipam-pool/(?P<IpamPoolId>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Bedrock\",\n\t\tAlias:     \"bedrock\",\n\t},\n\t{\n\t\tNamespace: \"AWS/Bedrock/Agents\",\n\t\tAlias:     \"bedrock-agents\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"bedrock:agent-alias\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<AgentAliasArn>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Bedrock/Guardrails\",\n\t\tAlias:     \"bedrock-guardrails\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"bedrock:guardrail\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\"(?P<GuardrailArn>.+)\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Events\",\n\t\tAlias:     \"event-rule\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"events\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":rule/(?P<EventBusName>[^/]+)/(?P<RuleName>[^/]+)$\"),\n\t\t\tregexp.MustCompile(\":rule/aws.partner/(?P<EventBusName>.+)/(?P<RuleName>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/VpcLattice\",\n\t\tAlias:     \"vpc-lattice\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"vpc-lattice:service\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":service/(?P<Service>[^/]+)$\"),\n\t\t},\n\t},\n\t{\n\t\tNamespace: \"AWS/Network Manager\",\n\t\tAlias:     \"networkmanager\",\n\t\tResourceFilters: []*string{\n\t\t\taws.String(\"networkmanager:core-network\"),\n\t\t},\n\t\tDimensionRegexps: []*regexp.Regexp{\n\t\t\tregexp.MustCompile(\":core-network/(?P<CoreNetwork>[^/]+)$\"),\n\t\t},\n\t},\n}\n"
  },
  {
    "path": "pkg/config/services_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSupportedServices(t *testing.T) {\n\tfor i, svc := range SupportedServices {\n\t\trequire.NotNil(t, svc.Namespace, fmt.Sprintf(\"Nil Namespace for service at index '%d'\", i))\n\t\trequire.NotNil(t, svc.Alias, fmt.Sprintf(\"Nil Alias for service '%s' at index '%d'\", svc.Namespace, i))\n\n\t\tif svc.ResourceFilters != nil {\n\t\t\trequire.NotEmpty(t, svc.ResourceFilters)\n\n\t\t\tfor _, filter := range svc.ResourceFilters {\n\t\t\t\trequire.NotEmpty(t, aws.ToString(filter))\n\t\t\t}\n\t\t}\n\n\t\tif svc.DimensionRegexps != nil {\n\t\t\trequire.NotEmpty(t, svc.DimensionRegexps)\n\n\t\t\tfor _, regex := range svc.DimensionRegexps {\n\t\t\t\trequire.NotEmpty(t, regex.String())\n\t\t\t\trequire.Positive(t, regex.NumSubexp())\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/config/testdata/config_test.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  exportedTagsOnMetrics:\n    AWS/EBS:\n      - VolumeId\n    AWS/Kafka:\n      - Name\n  jobs:\n    - type: AWS/Billing\n      regions:\n        - us-east-1\n      metrics:\n        - name: EstimatedCharges\n          statistics:\n            - Sum\n          period: 3600\n          length: 87600\n    - type: AWS/ES\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: Environment\n          value: qa\n      metrics:\n        - name: FreeStorageSpace\n          statistics:\n            - Sum\n          period: 60\n          length: 600\n        - name: ClusterStatus.green\n          statistics:\n            - Minimum\n          period: 60\n          length: 600\n        - name: ClusterStatus.yellow\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n        - name: ClusterStatus.red\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n    - type: AWS/ELB\n      regions:\n        - eu-west-1\n      length: 900\n      delay: 120\n      statistics:\n        - Minimum\n        - Maximum\n        - Sum\n      searchTags:\n        - key: KubernetesCluster\n          value: production-19\n      metrics:\n        - name: HealthyHostCount\n          statistics:\n            - Minimum\n          period: 600\n          length: 600 #(this will be ignored)\n        - name: HTTPCode_Backend_4XX\n          statistics:\n            - Sum\n          period: 60\n          length: 900 #(this will be ignored)\n          delay: 300 #(this will be ignored)\n          nilToZero: true\n        - name: HTTPCode_Backend_5XX\n          period: 60\n    - type: AWS/ApplicationELB\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: kubernetes.io/service-name\n          value: .*\n      metrics:\n        - name: UnHealthyHostCount\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n    - type: AWS/VPN\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: kubernetes.io/service-name\n          value: .*\n      metrics:\n        - name: TunnelState\n          statistics:\n            - p90\n          period: 60\n          length: 300\n    - type: AWS/Kinesis\n      regions:\n        - eu-west-1\n      metrics:\n        - name: PutRecords.Success\n          statistics:\n            - Sum\n          period: 60\n          length: 300\n    - type: AWS/KMS\n      regions:\n        - eu-west-1\n      metrics:\n        - name: SecondsUntilKeyMaterialExpiration\n          statistics:\n            - Minimum\n          period: 60\n          length: 300\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: type\n          value: public\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n    - type: AWS/EBS\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: type\n          value: public\n      metrics:\n        - name: BurstBalance\n          statistics:\n            - Minimum\n          period: 600\n          length: 600\n          addCloudwatchTimestamp: true\n    - type: AWS/Kafka\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: env\n          value: dev\n      metrics:\n        - name: BytesOutPerSec\n          statistics:\n            - Average\n          period: 600\n          length: 600\nstatic:\n  - namespace: AWS/AutoScaling\n    name: must_be_set\n    regions:\n      - eu-west-1\n    dimensions:\n      - name: AutoScalingGroupName\n        value: Test\n    customTags:\n      - key: CustomTag\n        value: CustomValue\n    metrics:\n      - name: GroupInServiceInstances\n        statistics:\n          - Minimum\n        period: 60\n        length: 300\n"
  },
  {
    "path": "pkg/config/testdata/custom_namespace.ok.yml",
    "content": "apiVersion: v1alpha1\nsts-region: eu-west-1\ncustomNamespace:\n  - name: customMetrics\n    namespace: CustomEC2Metrics\n    regions:\n      - us-east-1\n    metrics:\n      - name: cpu_usage_idle\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n      - name: disk_free\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n"
  },
  {
    "path": "pkg/config/testdata/custom_namespace_without_name.bad.yml",
    "content": "apiVersion: v1alpha1\nsts-region: eu-west-1\ncustomNamespace:\n  - namespace: CustomEC2Metrics\n    regions:\n      - us-east-1\n    metrics:\n      - name: cpu_usage_idle\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n      - name: disk_free\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n"
  },
  {
    "path": "pkg/config/testdata/custom_namespace_without_namespace.bad.yml",
    "content": "apiVersion: v1alpha1\nsts-region: eu-west-1\ncustomNamespace:\n  - name: customMetrics\n    regions:\n      - us-east-1\n    metrics:\n      - name: cpu_usage_idle\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n      - name: disk_free\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n"
  },
  {
    "path": "pkg/config/testdata/custom_namespace_without_region.bad.yml",
    "content": "apiVersion: v1alpha1\nsts-region: eu-west-1\ncustomNamespace:\n  - name: customMetrics\n    namespace: customMetrics\n    metrics:\n      - name: cpu_usage_idle\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n      - name: disk_free\n        statistics:\n          - Average\n        period: 300\n        length: 300\n        nilToZero: true\n"
  },
  {
    "path": "pkg/config/testdata/discovery_job_exported_tags_alias.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  exportedTagsOnMetrics:\n    s3:\n      - BucketName\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/discovery_job_exported_tags_mismatch.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  exportedTagsOnMetrics:\n    AWS/RDS:\n      - ClusterName\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/discovery_job_type_alias.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: s3\n      regions:\n        - eu-west-1\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/discovery_job_type_unknown.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/FancyNewNamespace\n      regions:\n        - eu-west-1\n      metrics:\n        - name: SomeMetric\n          statistics:\n            - Average\n"
  },
  {
    "path": "pkg/config/testdata/empty_rolearn.ok.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      roles:\n        - roleArn:\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/externalid_with_empty_rolearn.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      roles:\n        - externalId: something\n          roleArn:\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/externalid_without_rolearn.bad.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      roles:\n        - externalId: something\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/multiple_roles.ok.yml",
    "content": "apiVersion: v1alpha1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      roles:\n        - roleArn: something\n          externalId: something\n        - roleArn: something\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/sts_region.ok.yml",
    "content": "apiVersion: v1alpha1\nsts-region: eu-west-1\ndiscovery:\n  jobs:\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      roles:\n        - externalId: something\n          roleArn: something\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n"
  },
  {
    "path": "pkg/config/testdata/unknown_version.bad.yml",
    "content": "apiVersion: invalidVersion\ndiscovery:\n  exportedTagsOnMetrics:\n    AWS/EBS:\n      - VolumeId\n    AWS/Kafka:\n      - Name\n  jobs:\n    - type: AWS/Billing\n      regions:\n        - us-east-1\n      metrics:\n        - name: EstimatedCharges\n          statistics:\n            - Sum\n          period: 3600\n          length: 87600\n    - type: AWS/ES\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: Environment\n          value: qa\n      metrics:\n        - name: FreeStorageSpace\n          statistics:\n            - Sum\n          period: 60\n          length: 600\n        - name: ClusterStatus.green\n          statistics:\n            - Minimum\n          period: 60\n          length: 600\n        - name: ClusterStatus.yellow\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n        - name: ClusterStatus.red\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n    - type: AWS/ELB\n      regions:\n        - eu-west-1\n      length: 900\n      delay: 120\n      searchTags:\n        - key: KubernetesCluster\n          value: production-19\n      metrics:\n        - name: HealthyHostCount\n          statistics:\n            - Minimum\n          period: 600\n          length: 600 #(this will be ignored)\n        - name: HTTPCode_Backend_4XX\n          statistics:\n            - Sum\n          period: 60\n          length: 900 #(this will be ignored)\n          delay: 300 #(this will be ignored)\n          nilToZero: true\n    - type: AWS/ApplicationELB\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: kubernetes.io/service-name\n          value: .*\n      metrics:\n        - name: UnHealthyHostCount\n          statistics:\n            - Maximum\n          period: 60\n          length: 600\n    - type: AWS/VPN\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: kubernetes.io/service-name\n          value: .*\n      metrics:\n        - name: TunnelState\n          statistics:\n            - p90\n          period: 60\n          length: 300\n    - type: AWS/Kinesis\n      regions:\n        - eu-west-1\n      metrics:\n        - name: PutRecords.Success\n          statistics:\n            - Sum\n          period: 60\n          length: 300\n    - type: AWS/S3\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: type\n          value: public\n      metrics:\n        - name: NumberOfObjects\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n        - name: BucketSizeBytes\n          statistics:\n            - Average\n          period: 86400\n          length: 172800\n    - type: AWS/EBS\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: type\n          value: public\n      metrics:\n        - name: BurstBalance\n          statistics:\n            - Minimum\n          period: 600\n          length: 600\n          addCloudwatchTimestamp: true\n    - type: AWS/Kafka\n      regions:\n        - eu-west-1\n      searchTags:\n        - key: env\n          value: dev\n      metrics:\n        - name: BytesOutPerSec\n          statistics:\n            - Average\n          period: 600\n          length: 600\nstatic:\n  - namespace: AWS/AutoScaling\n    name: must_be_set\n    regions:\n      - eu-west-1\n    dimensions:\n      - name: AutoScalingGroupName\n        value: Test\n    customTags:\n      - key: CustomTag\n        value: CustomValue\n    metrics:\n      - name: GroupInServiceInstances\n        statistics:\n          - Minimum\n        period: 60\n        length: 300\n"
  },
  {
    "path": "pkg/exporter.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage exporter\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tprom \"github.com/prometheus/common/model\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/promutil\"\n)\n\n// Metrics is a slice of prometheus metrics specific to the scraping process such API call counters\nvar Metrics = []prometheus.Collector{\n\tpromutil.CloudwatchAPIErrorCounter,\n\tpromutil.CloudwatchAPICounter,\n\tpromutil.CloudwatchGetMetricDataAPICounter,\n\tpromutil.CloudwatchGetMetricDataAPIMetricsCounter,\n\tpromutil.CloudwatchGetMetricStatisticsAPICounter,\n\tpromutil.ResourceGroupTaggingAPICounter,\n\tpromutil.AutoScalingAPICounter,\n\tpromutil.TargetGroupsAPICounter,\n\tpromutil.APIGatewayAPICounter,\n\tpromutil.Ec2APICounter,\n\tpromutil.DmsAPICounter,\n\tpromutil.StoragegatewayAPICounter,\n\tpromutil.DuplicateMetricsFilteredCounter,\n}\n\nconst (\n\tDefaultMetricsPerQuery       = 500\n\tDefaultLabelsSnakeCase       = false\n\tDefaultTaggingAPIConcurrency = 5\n)\n\nvar DefaultCloudwatchConcurrency = cloudwatch.ConcurrencyConfig{\n\tSingleLimit:        5,\n\tPerAPILimitEnabled: false,\n\n\t// If PerAPILimitEnabled is enabled, then use the same limit as the single limit by default.\n\tListMetrics:         5,\n\tGetMetricData:       5,\n\tGetMetricStatistics: 5,\n}\n\n// featureFlagsMap is a map that contains the enabled feature flags. If a key is not present, it means the feature flag\n// is disabled.\ntype featureFlagsMap map[string]struct{}\n\ntype options struct {\n\tmetricsPerQuery       int\n\tlabelsSnakeCase       bool\n\ttaggingAPIConcurrency int\n\tfeatureFlags          featureFlagsMap\n\tcloudwatchConcurrency cloudwatch.ConcurrencyConfig\n}\n\n// IsFeatureEnabled implements the FeatureFlags interface, allowing us to inject the options-configure feature flags in the rest of the code.\nfunc (ff featureFlagsMap) IsFeatureEnabled(flag string) bool {\n\t_, ok := ff[flag]\n\treturn ok\n}\n\ntype OptionsFunc func(*options) error\n\nfunc MetricsPerQuery(metricsPerQuery int) OptionsFunc {\n\treturn func(o *options) error {\n\t\tif metricsPerQuery <= 0 {\n\t\t\treturn fmt.Errorf(\"MetricsPerQuery must be a positive value\")\n\t\t}\n\n\t\to.metricsPerQuery = metricsPerQuery\n\t\treturn nil\n\t}\n}\n\nfunc LabelsSnakeCase(labelsSnakeCase bool) OptionsFunc {\n\treturn func(o *options) error {\n\t\to.labelsSnakeCase = labelsSnakeCase\n\t\treturn nil\n\t}\n}\n\nfunc CloudWatchAPIConcurrency(maxConcurrency int) OptionsFunc {\n\treturn func(o *options) error {\n\t\tif maxConcurrency <= 0 {\n\t\t\treturn fmt.Errorf(\"CloudWatchAPIConcurrency must be a positive value\")\n\t\t}\n\n\t\to.cloudwatchConcurrency.SingleLimit = maxConcurrency\n\t\treturn nil\n\t}\n}\n\nfunc CloudWatchPerAPILimitConcurrency(listMetrics, getMetricData, getMetricStatistics int) OptionsFunc {\n\treturn func(o *options) error {\n\t\tif listMetrics <= 0 {\n\t\t\treturn fmt.Errorf(\"LitMetrics concurrency limit must be a positive value\")\n\t\t}\n\t\tif getMetricData <= 0 {\n\t\t\treturn fmt.Errorf(\"GetMetricData concurrency limit must be a positive value\")\n\t\t}\n\t\tif getMetricStatistics <= 0 {\n\t\t\treturn fmt.Errorf(\"GetMetricStatistics concurrency limit must be a positive value\")\n\t\t}\n\n\t\to.cloudwatchConcurrency.PerAPILimitEnabled = true\n\t\to.cloudwatchConcurrency.ListMetrics = listMetrics\n\t\to.cloudwatchConcurrency.GetMetricData = getMetricData\n\t\to.cloudwatchConcurrency.GetMetricStatistics = getMetricStatistics\n\t\treturn nil\n\t}\n}\n\nfunc TaggingAPIConcurrency(maxConcurrency int) OptionsFunc {\n\treturn func(o *options) error {\n\t\tif maxConcurrency <= 0 {\n\t\t\treturn fmt.Errorf(\"TaggingAPIConcurrency must be a positive value\")\n\t\t}\n\n\t\to.taggingAPIConcurrency = maxConcurrency\n\t\treturn nil\n\t}\n}\n\n// EnableFeatureFlag is an option that enables a feature flag on the YACE's entrypoint.\nfunc EnableFeatureFlag(flags ...string) OptionsFunc {\n\treturn func(o *options) error {\n\t\tfor _, flag := range flags {\n\t\t\to.featureFlags[flag] = struct{}{}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc defaultOptions() options {\n\treturn options{\n\t\tmetricsPerQuery:       DefaultMetricsPerQuery,\n\t\tlabelsSnakeCase:       DefaultLabelsSnakeCase,\n\t\ttaggingAPIConcurrency: DefaultTaggingAPIConcurrency,\n\t\tfeatureFlags:          make(featureFlagsMap),\n\t\tcloudwatchConcurrency: DefaultCloudwatchConcurrency,\n\t}\n}\n\n// UpdateMetrics is the entrypoint to scrape metrics from AWS on demand.\n//\n// Parameters are:\n// - `ctx`: a context for the request\n// - `config`: this is the struct representation of the configuration defined in top-level configuration\n// - `logger`: an *slog.Logger\n// - `registry`: any prometheus compatible registry where scraped AWS metrics will be written\n// - `factory`: any implementation of the `clients.Factory` interface\n// - `optFuncs`: (optional) any number of options funcs\n//\n// You can pre-register any of the default metrics from `Metrics` with the provided `registry` if you want them\n// included in the AWS scrape results. If you are using multiple instances of `registry` it\n// might make more sense to register these metrics in the application using YACE as a library to better\n// track them over the lifetime of the application.\nfunc UpdateMetrics(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tjobsCfg model.JobsConfig,\n\tregistry *prometheus.Registry,\n\tfactory clients.Factory,\n\toptFuncs ...OptionsFunc,\n) error {\n\t// Use legacy validation as that's the behaviour of former releases.\n\tprom.NameValidationScheme = prom.LegacyValidation //nolint:staticcheck\n\n\toptions := defaultOptions()\n\tfor _, f := range optFuncs {\n\t\tif err := f(&options); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// add feature flags to context passed down to all other layers\n\tctx = config.CtxWithFlags(ctx, options.featureFlags)\n\n\ttagsData, cloudwatchData := job.ScrapeAwsData(\n\t\tctx,\n\t\tlogger,\n\t\tjobsCfg,\n\t\tfactory,\n\t\toptions.metricsPerQuery,\n\t\toptions.cloudwatchConcurrency,\n\t\toptions.taggingAPIConcurrency,\n\t)\n\n\tmetrics, observedMetricLabels, err := promutil.BuildMetrics(cloudwatchData, options.labelsSnakeCase, logger)\n\tif err != nil {\n\t\tlogger.Error(\"Error migrating cloudwatch metrics to prometheus metrics\", \"err\", err)\n\t\treturn nil\n\t}\n\tmetrics, observedMetricLabels = promutil.BuildNamespaceInfoMetrics(tagsData, metrics, observedMetricLabels, options.labelsSnakeCase, logger)\n\tmetrics = promutil.EnsureLabelConsistencyAndRemoveDuplicates(metrics, observedMetricLabels)\n\n\tregistry.MustRegister(promutil.NewPrometheusCollector(metrics))\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/exporter_enhancedmetrics_test.go",
    "content": "package exporter\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\tdynamodbTypes \"github.com/aws/aws-sdk-go-v2/service/dynamodb/types\"\n\telasticacheTypes \"github.com/aws/aws-sdk-go-v2/service/elasticache/types\"\n\tlambdaTypes \"github.com/aws/aws-sdk-go-v2/service/lambda/types\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds/types\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/testutil\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics\"\n\tenhancedmetricsDynamoDBService \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/dynamodb\"\n\tenhancedmetricsElastiCacheService \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/elasticache\"\n\tenhancedmetricsLambdaService \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/lambda\"\n\tenhancedmetricsService \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/rds\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar (\n\t_ account.Client    = &mockAccountClient{}\n\t_ cloudwatch.Client = &mockCloudwatchClient{}\n\t_ tagging.Client    = &mockTaggingClient{}\n)\n\n// mockFactory is a local mock that implements both clients.Factory and config.RegionalConfigProvider\ntype mockFactoryForEnhancedMetrics struct {\n\taccountClient    account.Client\n\tcloudwatchClient cloudwatch.Client\n\ttaggingClient    tagging.Client\n\tawsConfig        *aws.Config\n}\n\n// GetAccountClient implements clients.Factory\nfunc (m *mockFactoryForEnhancedMetrics) GetAccountClient(string, model.Role) account.Client {\n\treturn m.accountClient\n}\n\n// GetCloudwatchClient implements clients.Factory\nfunc (m *mockFactoryForEnhancedMetrics) GetCloudwatchClient(string, model.Role, cloudwatch.ConcurrencyConfig) cloudwatch.Client {\n\treturn m.cloudwatchClient\n}\n\n// GetTaggingClient implements clients.Factory\nfunc (m *mockFactoryForEnhancedMetrics) GetTaggingClient(string, model.Role, int) tagging.Client {\n\treturn m.taggingClient\n}\n\n// GetAWSRegionalConfig implements config.RegionalConfigProvider\nfunc (m *mockFactoryForEnhancedMetrics) GetAWSRegionalConfig(string, model.Role) *aws.Config {\n\treturn m.awsConfig\n}\n\n// mockRDSClient implements the RDS Client interface for testing\ntype mockRDSClient struct {\n\tinstances []types.DBInstance\n\terr       error\n}\n\nfunc (m *mockRDSClient) DescribeDBInstances(context.Context, *slog.Logger, []string) ([]types.DBInstance, error) {\n\tif m.err != nil {\n\t\treturn nil, m.err\n\t}\n\treturn m.instances, nil\n}\n\n// mockLambdaClient implements the Lambda Client interface for testing\ntype mockLambdaClient struct {\n\tfunctions []lambdaTypes.FunctionConfiguration\n\terr       error\n}\n\nfunc (m *mockLambdaClient) ListAllFunctions(context.Context, *slog.Logger) ([]lambdaTypes.FunctionConfiguration, error) {\n\tif m.err != nil {\n\t\treturn nil, m.err\n\t}\n\treturn m.functions, nil\n}\n\n// mockElastiCacheClient implements the ElastiCache Client interface for testing\ntype mockElastiCacheClient struct {\n\tclusters []elasticacheTypes.CacheCluster\n\terr      error\n}\n\nfunc (m *mockElastiCacheClient) DescribeAllCacheClusters(context.Context, *slog.Logger) ([]elasticacheTypes.CacheCluster, error) {\n\tif m.err != nil {\n\t\treturn nil, m.err\n\t}\n\treturn m.clusters, nil\n}\n\n// mockDynamoDBClient implements the DynamoDB Client interface for testing\ntype mockDynamoDBClient struct {\n\ttables []dynamodbTypes.TableDescription\n\terr    error\n}\n\nfunc (m *mockDynamoDBClient) DescribeTables(context.Context, *slog.Logger, []string) ([]dynamodbTypes.TableDescription, error) {\n\tif m.err != nil {\n\t\treturn nil, m.err\n\t}\n\treturn m.tables, nil\n}\n\nfunc TestUpdateMetrics_WithEnhancedMetrics_RDS(t *testing.T) {\n\tdefer enhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsService.NewRDSService(nil),\n\t)\n\tctx := context.Background()\n\tlogger := slog.New(slog.DiscardHandler)\n\n\t// Create a test AWS config\n\ttestAWSConfig := &aws.Config{\n\t\tRegion: \"us-east-1\",\n\t}\n\n\t// Create mock clients\n\tmockAcctClient := &mockAccountClient{\n\t\taccountID:    \"123456789012\",\n\t\taccountAlias: \"test-account\",\n\t}\n\n\tmockCWClient := &mockCloudwatchClient{\n\t\tmetrics:           []*model.Metric{},\n\t\tmetricDataResults: []cloudwatch.MetricDataResult{},\n\t}\n\n\tmockTagClient := &mockTaggingClient{\n\t\tresources: []*model.TaggedResource{\n\t\t\t{\n\t\t\t\tARN:       \"arn:aws:rds:us-east-1:123456789012:db:test-db\",\n\t\t\t\tNamespace: \"AWS/RDS\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t{Key: \"Name\", Value: \"test-db\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create a mock RDS client builder function for testing\n\tmockRDSClientBuilder := func(_ aws.Config) enhancedmetricsService.Client {\n\t\treturn &mockRDSClient{\n\t\t\tinstances: []types.DBInstance{\n\t\t\t\t{\n\t\t\t\t\tDBInstanceArn:        aws.String(\"arn:aws:rds:us-east-1:123456789012:db:test-db\"),\n\t\t\t\t\tDBInstanceIdentifier: aws.String(\"test-db\"),\n\t\t\t\t\tAllocatedStorage:     aws.Int32(100),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Register the RDS service with the mock builder in the default registry\n\tenhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsService.NewRDSService(mockRDSClientBuilder),\n\t)\n\n\tfactory := &mockFactoryForEnhancedMetrics{\n\t\taccountClient:    mockAcctClient,\n\t\tcloudwatchClient: mockCWClient,\n\t\ttaggingClient:    mockTagClient,\n\t\tawsConfig:        testAWSConfig,\n\t}\n\n\t// Create a test job config with enhanced metrics\n\tjobsCfg := model.JobsConfig{\n\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t{\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tNamespace: \"AWS/RDS\",\n\t\t\t\tRoles:     []model.Role{{RoleArn: \"arn:aws:iam::123456789012:role/test-role\"}},\n\t\t\t\tEnhancedMetrics: []*model.EnhancedMetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"AllocatedStorage\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExportedTagsOnMetrics: []string{\"Name\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\tmetrics, err := registry.Gather()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, metrics)\n\trequire.Len(t, metrics, 2)\n\n\texpectedMetric := `\n\t\t# HELP aws_rds_info Help is not implemented yet.\n\t\t# TYPE aws_rds_info gauge\n\t\taws_rds_info{name=\"arn:aws:rds:us-east-1:123456789012:db:test-db\",tag_Name=\"test-db\"} 0\n\t\t# HELP aws_rds_allocated_storage Help is not implemented yet.\n\t\t# TYPE aws_rds_allocated_storage gauge\n\t\taws_rds_allocated_storage{account_alias=\"test-account\",account_id=\"123456789012\",dimension_DBInstanceIdentifier=\"test-db\",name=\"arn:aws:rds:us-east-1:123456789012:db:test-db\",region=\"us-east-1\",tag_Name=\"test-db\"} 1.073741824e+11\n`\n\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err)\n}\n\nfunc TestUpdateMetrics_WithEnhancedMetrics_Lambda(t *testing.T) {\n\tdefer enhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsLambdaService.NewLambdaService(nil),\n\t)\n\n\tctx := context.Background()\n\tlogger := slog.New(slog.DiscardHandler)\n\n\t// Create a test AWS config\n\ttestAWSConfig := &aws.Config{\n\t\tRegion: \"us-east-1\",\n\t}\n\n\t// Create mock clients\n\tmockAcctClient := &mockAccountClient{\n\t\taccountID:    \"123456789012\",\n\t\taccountAlias: \"test-account\",\n\t}\n\n\tmockCWClient := &mockCloudwatchClient{\n\t\tmetrics:           []*model.Metric{},\n\t\tmetricDataResults: []cloudwatch.MetricDataResult{},\n\t}\n\n\tmockTagClient := &mockTaggingClient{\n\t\tresources: []*model.TaggedResource{\n\t\t\t{\n\t\t\t\tARN:       \"arn:aws:lambda:us-east-1:123456789012:function:test-function\",\n\t\t\t\tNamespace: \"AWS/Lambda\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t{Key: \"Name\", Value: \"test-function\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create a mock Lambda client builder function for testing\n\tmockLambdaClientBuilder := func(_ aws.Config) enhancedmetricsLambdaService.Client {\n\t\treturn &mockLambdaClient{\n\t\t\tfunctions: []lambdaTypes.FunctionConfiguration{\n\t\t\t\t{\n\t\t\t\t\tFunctionArn:  aws.String(\"arn:aws:lambda:us-east-1:123456789012:function:test-function\"),\n\t\t\t\t\tFunctionName: aws.String(\"test-function\"),\n\t\t\t\t\tTimeout:      aws.Int32(300),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Register the Lambda service with the mock builder in the default registry\n\tenhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsLambdaService.NewLambdaService(mockLambdaClientBuilder),\n\t)\n\n\tfactory := &mockFactoryForEnhancedMetrics{\n\t\taccountClient:    mockAcctClient,\n\t\tcloudwatchClient: mockCWClient,\n\t\ttaggingClient:    mockTagClient,\n\t\tawsConfig:        testAWSConfig,\n\t}\n\n\t// Create a test job config with enhanced metrics\n\tjobsCfg := model.JobsConfig{\n\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t{\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tNamespace: \"AWS/Lambda\",\n\t\t\t\tRoles:     []model.Role{{RoleArn: \"arn:aws:iam::123456789012:role/test-role\"}},\n\t\t\t\tEnhancedMetrics: []*model.EnhancedMetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"Timeout\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExportedTagsOnMetrics: []string{\"Name\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\tmetrics, err := registry.Gather()\n\n\trequire.NoError(t, err)\n\trequire.NotNil(t, metrics)\n\trequire.Len(t, metrics, 2)\n\n\texpectedMetric := `\n\t\t# HELP aws_lambda_info Help is not implemented yet.\n\t\t# TYPE aws_lambda_info gauge\n\t\taws_lambda_info{name=\"arn:aws:lambda:us-east-1:123456789012:function:test-function\",tag_Name=\"test-function\"} 0\n\t\t# HELP aws_lambda_timeout Help is not implemented yet.\n\t\t# TYPE aws_lambda_timeout gauge\n\t\taws_lambda_timeout{account_alias=\"test-account\",account_id=\"123456789012\",dimension_FunctionName=\"test-function\",name=\"arn:aws:lambda:us-east-1:123456789012:function:test-function\",region=\"us-east-1\",tag_Name=\"test-function\"} 300\n`\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err)\n}\n\nfunc TestUpdateMetrics_WithEnhancedMetrics_ElastiCache(t *testing.T) {\n\tdefer enhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsElastiCacheService.NewElastiCacheService(nil),\n\t)\n\n\tctx := context.Background()\n\tlogger := slog.New(slog.DiscardHandler)\n\n\t// Create a test AWS config\n\ttestAWSConfig := &aws.Config{\n\t\tRegion: \"us-east-1\",\n\t}\n\n\t// Create mock clients\n\tmockAcctClient := &mockAccountClient{\n\t\taccountID:    \"123456789012\",\n\t\taccountAlias: \"test-account\",\n\t}\n\n\tmockCWClient := &mockCloudwatchClient{\n\t\tmetrics:           []*model.Metric{},\n\t\tmetricDataResults: []cloudwatch.MetricDataResult{},\n\t}\n\n\tmockTagClient := &mockTaggingClient{\n\t\tresources: []*model.TaggedResource{\n\t\t\t{\n\t\t\t\tARN:       \"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\",\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t{Key: \"Name\", Value: \"test-cluster\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create a mock ElastiCache client builder function for testing\n\tmockElastiCacheClientBuilder := func(_ aws.Config) enhancedmetricsElastiCacheService.Client {\n\t\treturn &mockElastiCacheClient{\n\t\t\tclusters: []elasticacheTypes.CacheCluster{\n\t\t\t\t{\n\t\t\t\t\tARN:            aws.String(\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\"),\n\t\t\t\t\tCacheClusterId: aws.String(\"test-cluster\"),\n\t\t\t\t\tNumCacheNodes:  aws.Int32(3),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Register the ElastiCache service with the mock builder in the default registry\n\tenhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsElastiCacheService.NewElastiCacheService(mockElastiCacheClientBuilder),\n\t)\n\n\tfactory := &mockFactoryForEnhancedMetrics{\n\t\taccountClient:    mockAcctClient,\n\t\tcloudwatchClient: mockCWClient,\n\t\ttaggingClient:    mockTagClient,\n\t\tawsConfig:        testAWSConfig,\n\t}\n\n\t// Create a test job config with enhanced metrics\n\tjobsCfg := model.JobsConfig{\n\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t{\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tRoles:     []model.Role{{RoleArn: \"arn:aws:iam::123456789012:role/test-role\"}},\n\t\t\t\tEnhancedMetrics: []*model.EnhancedMetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"NumCacheNodes\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExportedTagsOnMetrics: []string{\"Name\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\tmetrics, err := registry.Gather()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, metrics)\n\trequire.Len(t, metrics, 2)\n\n\texpectedMetric := `\n\t\t# HELP aws_elasticache_info Help is not implemented yet.\n\t\t# TYPE aws_elasticache_info gauge\n\t\taws_elasticache_info{name=\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\",tag_Name=\"test-cluster\"} 0\n\t\t# HELP aws_elasticache_num_cache_nodes Help is not implemented yet.\n\t\t# TYPE aws_elasticache_num_cache_nodes gauge\n\t\taws_elasticache_num_cache_nodes{account_alias=\"test-account\",account_id=\"123456789012\",dimension_CacheClusterId=\"test-cluster\",name=\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\",region=\"us-east-1\",tag_Name=\"test-cluster\"} 3\n`\n\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err)\n}\n\nfunc TestUpdateMetrics_WithEnhancedMetrics_DynamoDB(t *testing.T) {\n\tdefer enhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsDynamoDBService.NewDynamoDBService(nil),\n\t)\n\n\tctx := context.Background()\n\tlogger := slog.New(slog.DiscardHandler)\n\n\t// Create a test AWS config\n\ttestAWSConfig := &aws.Config{\n\t\tRegion: \"us-east-1\",\n\t}\n\n\t// Create mock clients\n\tmockAcctClient := &mockAccountClient{\n\t\taccountID:    \"123456789012\",\n\t\taccountAlias: \"test-account\",\n\t}\n\n\tmockCWClient := &mockCloudwatchClient{\n\t\tmetrics:           []*model.Metric{},\n\t\tmetricDataResults: []cloudwatch.MetricDataResult{},\n\t}\n\n\tmockTagClient := &mockTaggingClient{\n\t\tresources: []*model.TaggedResource{\n\t\t\t{\n\t\t\t\tARN:       \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\",\n\t\t\t\tNamespace: \"AWS/DynamoDB\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t{Key: \"Name\", Value: \"test-table\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create a mock DynamoDB client builder function for testing\n\tmockDynamoDBClientBuilder := func(_ aws.Config) enhancedmetricsDynamoDBService.Client {\n\t\treturn &mockDynamoDBClient{\n\t\t\ttables: []dynamodbTypes.TableDescription{\n\t\t\t\t{\n\t\t\t\t\tTableArn:  aws.String(\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\"),\n\t\t\t\t\tTableName: aws.String(\"test-table\"),\n\t\t\t\t\tItemCount: aws.Int64(1000),\n\t\t\t\t\tGlobalSecondaryIndexes: []dynamodbTypes.GlobalSecondaryIndexDescription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIndexName: aws.String(\"GSI1\"),\n\t\t\t\t\t\t\tItemCount: aws.Int64(500),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIndexName: aws.String(\"GSI2\"),\n\t\t\t\t\t\t\tItemCount: aws.Int64(300),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Register the DynamoDB service with the mock builder in the default registry\n\tenhancedmetrics.DefaultEnhancedMetricServiceRegistry.Register(\n\t\tenhancedmetricsDynamoDBService.NewDynamoDBService(mockDynamoDBClientBuilder),\n\t)\n\n\tfactory := &mockFactoryForEnhancedMetrics{\n\t\taccountClient:    mockAcctClient,\n\t\tcloudwatchClient: mockCWClient,\n\t\ttaggingClient:    mockTagClient,\n\t\tawsConfig:        testAWSConfig,\n\t}\n\n\t// Create a test job config with enhanced metrics\n\tjobsCfg := model.JobsConfig{\n\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t{\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tNamespace: \"AWS/DynamoDB\",\n\t\t\t\tRoles:     []model.Role{{RoleArn: \"arn:aws:iam::123456789012:role/test-role\"}},\n\t\t\t\tEnhancedMetrics: []*model.EnhancedMetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"ItemCount\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExportedTagsOnMetrics: []string{\"Name\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\tmetrics, err := registry.Gather()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, metrics)\n\trequire.Len(t, metrics, 2)\n\n\texpectedMetric := `\n\t\t# HELP aws_dynamodb_info Help is not implemented yet.\n\t\t# TYPE aws_dynamodb_info gauge\n\t\taws_dynamodb_info{name=\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\",tag_Name=\"test-table\"} 0\n\t\t# HELP aws_dynamodb_item_count Help is not implemented yet.\n\t\t# TYPE aws_dynamodb_item_count gauge\n\t\taws_dynamodb_item_count{account_alias=\"test-account\",account_id=\"123456789012\",dimension_GlobalSecondaryIndexName=\"\",dimension_TableName=\"test-table\",name=\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\",region=\"us-east-1\",tag_Name=\"test-table\"} 1000\n\t\taws_dynamodb_item_count{account_alias=\"test-account\",account_id=\"123456789012\",dimension_GlobalSecondaryIndexName=\"GSI1\",dimension_TableName=\"test-table\",name=\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\",region=\"us-east-1\",tag_Name=\"test-table\"} 500\n\t\taws_dynamodb_item_count{account_alias=\"test-account\",account_id=\"123456789012\",dimension_GlobalSecondaryIndexName=\"GSI2\",dimension_TableName=\"test-table\",name=\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\",region=\"us-east-1\",tag_Name=\"test-table\"} 300\n`\n\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "pkg/exporter_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage exporter\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/grafana/regexp\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/testutil\"\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// mockFactory implements the clients.Factory interface for testing\ntype mockFactory struct {\n\tcloudwatchClient mockCloudwatchClient\n\ttaggingClient    mockTaggingClient\n\taccountClient    mockAccountClient\n}\n\nfunc (f *mockFactory) GetCloudwatchClient(_ string, _ model.Role, _ cloudwatch.ConcurrencyConfig) cloudwatch.Client {\n\treturn &f.cloudwatchClient\n}\n\nfunc (f *mockFactory) GetTaggingClient(_ string, _ model.Role, _ int) tagging.Client {\n\treturn f.taggingClient\n}\n\nfunc (f *mockFactory) GetAccountClient(_ string, _ model.Role) account.Client {\n\treturn f.accountClient\n}\n\n// mockAccountClient implements the account.Client interface\ntype mockAccountClient struct {\n\taccountID    string\n\taccountAlias string\n\terr          error\n}\n\nfunc (m mockAccountClient) GetAccount(_ context.Context) (string, error) {\n\tif m.err != nil {\n\t\treturn \"\", m.err\n\t}\n\treturn m.accountID, nil\n}\n\nfunc (m mockAccountClient) GetAccountAlias(_ context.Context) (string, error) {\n\tif m.err != nil {\n\t\treturn \"\", m.err\n\t}\n\treturn m.accountAlias, nil\n}\n\n// mockTaggingClient implements the tagging.Client interface\ntype mockTaggingClient struct {\n\tresources []*model.TaggedResource\n\terr       error\n}\n\nfunc (m mockTaggingClient) GetResources(_ context.Context, _ model.DiscoveryJob, _ string) ([]*model.TaggedResource, error) {\n\tif m.err != nil {\n\t\treturn nil, m.err\n\t}\n\treturn m.resources, nil\n}\n\n// mockCloudwatchClient implements the cloudwatch.Client interface\ntype mockCloudwatchClient struct {\n\tmetrics           []*model.Metric\n\tmetricDataResults []cloudwatch.MetricDataResult\n\terr               error\n}\n\nfunc (m *mockCloudwatchClient) ListMetrics(_ context.Context, _ string, _ *model.MetricConfig, _ bool, fn func(page []*model.Metric)) error {\n\tif m.err != nil {\n\t\treturn m.err\n\t}\n\tif len(m.metrics) > 0 {\n\t\tfn(m.metrics)\n\t}\n\treturn nil\n}\n\nfunc (m *mockCloudwatchClient) GetMetricData(_ context.Context, _ []*model.CloudwatchData, _ string, _ time.Time, _ time.Time) []cloudwatch.MetricDataResult {\n\treturn m.metricDataResults\n}\n\nfunc (m *mockCloudwatchClient) GetMetricStatistics(_ context.Context, _ *slog.Logger, _ []model.Dimension, _ string, _ *model.MetricConfig) []*model.MetricStatisticsResult {\n\t// Return a simple metric statistics result for testing\n\tnow := time.Now()\n\tavg := 42.0\n\treturn []*model.MetricStatisticsResult{\n\t\t{\n\t\t\tTimestamp: &now,\n\t\t\tAverage:   &avg,\n\t\t},\n\t}\n}\n\nfunc TestUpdateMetrics_StaticJob(t *testing.T) {\n\tctx := context.Background()\n\tlogger := promslog.NewNopLogger()\n\n\t// Create a simple static job configuration\n\tjobsCfg := model.JobsConfig{\n\t\tStaticJobs: []model.StaticJob{\n\t\t\t{\n\t\t\t\tName:      \"test-static-job\",\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tRoles:     []model.Role{{}},\n\t\t\t\tNamespace: \"AWS/EC2\",\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{Name: \"InstanceId\", Value: \"i-1234567890abcdef0\"},\n\t\t\t\t},\n\t\t\t\tMetrics: []*model.MetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"CPUUtilization\",\n\t\t\t\t\t\tStatistics: []string{\"Average\"},\n\t\t\t\t\t\tPeriod:     300,\n\t\t\t\t\t\tLength:     300,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := &mockFactory{\n\t\taccountClient: mockAccountClient{\n\t\t\taccountID:    \"123456789012\",\n\t\t\taccountAlias: \"test-account\",\n\t\t},\n\t\tcloudwatchClient: mockCloudwatchClient{},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\t// Verify the expected metric exists using testutil\n\texpectedMetric := `\n\t\t# HELP aws_ec2_cpuutilization_average Help is not implemented yet.\n\t\t# TYPE aws_ec2_cpuutilization_average gauge\n\t\taws_ec2_cpuutilization_average{account_alias=\"test-account\",account_id=\"123456789012\",dimension_InstanceId=\"i-1234567890abcdef0\",name=\"test-static-job\",region=\"us-east-1\"} 42\n\t`\n\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err, \"Metric aws_ec2_cpuutilization_average should match expected output\")\n}\n\nfunc TestUpdateMetrics_DiscoveryJob(t *testing.T) {\n\tctx := context.Background()\n\tlogger := promslog.NewNopLogger()\n\n\t// Create a discovery job configuration\n\tsvc := config.SupportedServices.GetService(\"AWS/EC2\")\n\tjobsCfg := model.JobsConfig{\n\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t{\n\t\t\t\tNamespace: \"AWS/EC2\",\n\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\tRoles:     []model.Role{{}},\n\t\t\t\tSearchTags: []model.SearchTag{\n\t\t\t\t\t{Key: \"Environment\", Value: regexp.MustCompile(\".*\")},\n\t\t\t\t},\n\t\t\t\tMetrics: []*model.MetricConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"CPUUtilization\",\n\t\t\t\t\t\tStatistics: []string{\"Average\"},\n\t\t\t\t\t\tPeriod:     300,\n\t\t\t\t\t\tLength:     300,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tDimensionsRegexps: svc.ToModelDimensionsRegexp(),\n\t\t\t},\n\t\t},\n\t}\n\n\tfactory := &mockFactory{\n\t\taccountClient: mockAccountClient{\n\t\t\taccountID:    \"123456789012\",\n\t\t\taccountAlias: \"test-account\",\n\t\t},\n\t\ttaggingClient: mockTaggingClient{\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{\n\t\t\t\t\tARN:       \"arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0\",\n\t\t\t\t\tNamespace: \"AWS/EC2\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t{Key: \"Environment\", Value: \"production\"},\n\t\t\t\t\t\t{Key: \"Name\", Value: \"test-instance\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tcloudwatchClient: mockCloudwatchClient{\n\t\t\tmetrics: []*model.Metric{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"AWS/EC2\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InstanceId\", Value: \"i-1234567890abcdef0\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetricDataResults: []cloudwatch.MetricDataResult{\n\t\t\t\t{\n\t\t\t\t\tID: \"id_0\",\n\t\t\t\t\tDataPoints: []cloudwatch.DataPoint{\n\t\t\t\t\t\t{Value: aws.Float64(42.5), Timestamp: time.Now()},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\n\terr := UpdateMetrics(ctx, logger, jobsCfg, registry, factory)\n\trequire.NoError(t, err)\n\n\texpectedMetric := `\n\t\t# HELP aws_ec2_cpuutilization_average Help is not implemented yet.\n\t\t# TYPE aws_ec2_cpuutilization_average gauge\n\t\taws_ec2_cpuutilization_average{account_alias=\"test-account\", account_id=\"123456789012\",dimension_InstanceId=\"i-1234567890abcdef0\",name=\"arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0\",region=\"us-east-1\"} 42.5\n\t\t# HELP aws_ec2_info Help is not implemented yet.\n\t\t# TYPE aws_ec2_info gauge\n                aws_ec2_info{name=\"arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0\",tag_Environment=\"production\",tag_Name=\"test-instance\"} 0\n\t`\n\terr = testutil.GatherAndCompare(registry, strings.NewReader(expectedMetric))\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/config/provider.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage config\n\nimport (\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// RegionalConfigProvider is an interface for providing AWS regional configurations based on region and role.\n// Factory interface implementations should implement this interface in order to support enhanced metrics.\ntype RegionalConfigProvider interface {\n\t// GetAWSRegionalConfig returns the AWS configuration for a given region and role.\n\t// It will be used to create AWS service clients for enhanced metrics processing.\n\tGetAWSRegionalConfig(region string, role model.Role) *aws.Config\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/registry.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage enhancedmetrics\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/dynamodb\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/elasticache\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/lambda\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/rds\"\n)\n\n// DefaultEnhancedMetricServiceRegistry is the default registry containing all built-in enhanced metrics services\n// It allows registering additional services if needed, or replacing existing ones, e.g. for testing purposes.\n//\n// Note:In the future, it can be removed in favor of being injected via dependency injection.\n// However, it will require changes in the YACE's API.\nvar DefaultEnhancedMetricServiceRegistry = (&Registry{}).\n\tRegister(rds.NewRDSService(nil)).\n\tRegister(lambda.NewLambdaService(nil)).\n\tRegister(dynamodb.NewDynamoDBService(nil)).\n\tRegister(elasticache.NewElastiCacheService(nil))\n\n// MetricsService represents an enhanced metrics service with methods to get its instance and namespace.\n// Services implementing this interface can be registered in the Registry.\ntype MetricsService interface {\n\tInstance() service.EnhancedMetricsService\n\tGetNamespace() string\n}\n\n// Registry maintains a mapping of enhanced metrics services by their namespaces.\ntype Registry struct {\n\tm sync.RWMutex\n\n\tservices map[string]func() service.EnhancedMetricsService\n}\n\n// Register adds a new enhanced metrics service to the registry or replaces an existing one with the same namespace.\nfunc (receiver *Registry) Register(t MetricsService) *Registry {\n\treceiver.m.Lock()\n\tdefer receiver.m.Unlock()\n\n\tif receiver.services == nil {\n\t\treceiver.services = map[string]func() service.EnhancedMetricsService{}\n\t}\n\treceiver.services[t.GetNamespace()] = t.Instance\n\n\treturn receiver\n}\n\n// GetEnhancedMetricsService retrieves an enhanced metrics service by its namespace.\nfunc (receiver *Registry) GetEnhancedMetricsService(namespace string) (service.EnhancedMetricsService, error) {\n\treceiver.m.RLock()\n\tdefer receiver.m.RUnlock()\n\n\tif constructor, exists := receiver.services[namespace]; exists {\n\t\treturn constructor(), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"enhanced metrics service for namespace %s not found\", namespace)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/registry_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage enhancedmetrics\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service/rds\"\n)\n\n// registryMockMetricsServiceWrapper wraps the mock service to implement MetricsService interface\ntype registryMockMetricsServiceWrapper struct {\n\tnamespace    string\n\tinstanceFunc func() service.EnhancedMetricsService\n}\n\nfunc (m *registryMockMetricsServiceWrapper) GetNamespace() string {\n\treturn m.namespace\n}\n\nfunc (m *registryMockMetricsServiceWrapper) Instance() service.EnhancedMetricsService {\n\tif m.instanceFunc != nil {\n\t\treturn m.instanceFunc()\n\t}\n\treturn nil\n}\n\nfunc TestRegistry_Register(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tsetup      func() *Registry\n\t\tservices   []string\n\t\tassertions func(t *testing.T, registry *Registry)\n\t}{\n\t\t{\n\t\t\tname:     \"register single service\",\n\t\t\tsetup:    func() *Registry { return &Registry{} },\n\t\t\tservices: []string{\"AWS/Test\"},\n\t\t\tassertions: func(t *testing.T, registry *Registry) {\n\t\t\t\tassert.NotNil(t, registry.services)\n\t\t\t\tassert.Contains(t, registry.services, \"AWS/Test\")\n\t\t\t\tassert.Len(t, registry.services, 1)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"register multiple services\",\n\t\t\tsetup:    func() *Registry { return &Registry{} },\n\t\t\tservices: []string{\"AWS/Test1\", \"AWS/Test2\"},\n\t\t\tassertions: func(t *testing.T, registry *Registry) {\n\t\t\t\tassert.Len(t, registry.services, 2)\n\t\t\t\tassert.Contains(t, registry.services, \"AWS/Test1\")\n\t\t\t\tassert.Contains(t, registry.services, \"AWS/Test2\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"replace existing service\",\n\t\t\tsetup:    func() *Registry { return &Registry{} },\n\t\t\tservices: []string{\"AWS/Test\", \"AWS/Test\"},\n\t\t\tassertions: func(t *testing.T, registry *Registry) {\n\t\t\t\tassert.Len(t, registry.services, 1)\n\t\t\t\t_, err := registry.GetEnhancedMetricsService(\"AWS/Test\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"register on nil services map\",\n\t\t\tsetup:    func() *Registry { return &Registry{} },\n\t\t\tservices: []string{\"AWS/Test\"},\n\t\t\tassertions: func(t *testing.T, registry *Registry) {\n\t\t\t\tassert.NotNil(t, registry.services)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tregistry := tt.setup()\n\n\t\t\tvar result *Registry\n\t\t\tfor _, ns := range tt.services {\n\t\t\t\tmockSvc := &registryMockMetricsServiceWrapper{\n\t\t\t\t\tnamespace: ns,\n\t\t\t\t}\n\t\t\t\tresult = registry.Register(mockSvc)\n\t\t\t}\n\n\t\t\tassert.NotNil(t, result)\n\t\t\tassert.Equal(t, registry, result, \"Register should return the registry for chaining\")\n\t\t\ttt.assertions(t, registry)\n\t\t})\n\t}\n}\n\nfunc TestRegistry_GetEnhancedMetricsService(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tsetup       func() *Registry\n\t\tnamespace   string\n\t\texpectError bool\n\t\terror       string\n\t}{\n\t\t{\n\t\t\tname: \"get existing service\",\n\t\t\tsetup: func() *Registry {\n\t\t\t\tregistry := &Registry{}\n\t\t\t\tregistry.Register(rds.NewRDSService(nil))\n\t\t\t\treturn registry\n\t\t\t},\n\t\t\tnamespace:   \"AWS/RDS\",\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"get non-existent service\",\n\t\t\tsetup: func() *Registry {\n\t\t\t\tregistry := &Registry{}\n\t\t\t\tregistry.Register(rds.NewRDSService(nil))\n\t\t\t\treturn registry\n\t\t\t},\n\t\t\tnamespace:   \"AWS/NonExistent\",\n\t\t\texpectError: true,\n\t\t\terror:       \"enhanced metrics service for namespace AWS/NonExistent not found\",\n\t\t},\n\t\t{\n\t\t\tname: \"get service from empty registry\",\n\t\t\tsetup: func() *Registry {\n\t\t\t\treturn &Registry{}\n\t\t\t},\n\t\t\tnamespace:   \"AWS/Test\",\n\t\t\terror:       \"enhanced metrics service for namespace AWS/Test not found\",\n\t\t\texpectError: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tregistry := tt.setup()\n\t\t\tsvc, err := registry.GetEnhancedMetricsService(tt.namespace)\n\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Equal(t, err.Error(), tt.error)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotNil(t, svc)\n\t\t\t}\n\t\t})\n\t}\n\n\tt.Run(\"service instance is independent\", func(t *testing.T) {\n\t\tregistry := &Registry{}\n\t\tregistry.Register(rds.NewRDSService(nil))\n\t\tsvc1, err1 := registry.GetEnhancedMetricsService(\"AWS/RDS\")\n\t\tsvc2, err2 := registry.GetEnhancedMetricsService(\"AWS/RDS\")\n\n\t\trequire.NoError(t, err1)\n\t\trequire.NoError(t, err2)\n\t\tassert.NotNil(t, svc1)\n\t\tassert.NotNil(t, svc2)\n\n\t\t// Each call to Instance() should return a new instance\n\t\t// This test verifies that the constructor function is being called\n\n\t\t// copy the pointer addresses to compare\n\t\tassert.NotSame(t, svc1, svc2, \"Each call to GetEnhancedMetricsService should return a new instance\")\n\t})\n}\n\nfunc TestRegistry_ConcurrentAccess(t *testing.T) {\n\tt.Run(\"concurrent registration\", func(t *testing.T) {\n\t\tregistry := &Registry{}\n\t\tvar wg sync.WaitGroup\n\n\t\t// Register multiple services concurrently\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tmockSvc := &registryMockMetricsServiceWrapper{\n\t\t\t\t\tnamespace: \"AWS/Test\" + string(rune('0'+idx)),\n\t\t\t\t}\n\t\t\t\tregistry.Register(mockSvc)\n\t\t\t}(i)\n\t\t}\n\n\t\twg.Wait()\n\t\tassert.Len(t, registry.services, 10)\n\t})\n\n\tt.Run(\"concurrent read and write\", func(t *testing.T) {\n\t\tregistry := &Registry{}\n\t\tmockSvc := &registryMockMetricsServiceWrapper{\n\t\t\tnamespace: \"AWS/Test\",\n\t\t}\n\t\tregistry.Register(mockSvc)\n\n\t\tvar wg sync.WaitGroup\n\t\terrors := make(chan error, 20)\n\n\t\t// Concurrent reads\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, err := registry.GetEnhancedMetricsService(\"AWS/Test\")\n\t\t\t\tif err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\t// Concurrent writes\n\t\tfor i := 0; i < 10; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(idx int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tmockSvc := &registryMockMetricsServiceWrapper{\n\t\t\t\t\tnamespace: \"AWS/NewTest\" + string(rune('0'+idx)),\n\t\t\t\t}\n\t\t\t\tregistry.Register(mockSvc)\n\t\t\t}(i)\n\t\t}\n\n\t\twg.Wait()\n\t\tclose(errors)\n\n\t\tfor err := range errors {\n\t\t\tassert.NoError(t, err)\n\t\t}\n\t})\n}\n\nfunc TestDefaultRegistry(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tnamespace   string\n\t\texpectError bool\n\t}{\n\t\t{\n\t\t\tname:        \"AWS/RDS is registered\",\n\t\t\tnamespace:   \"AWS/RDS\",\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"AWS/Lambda is registered\",\n\t\t\tnamespace:   \"AWS/Lambda\",\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"AWS/DynamoDB is registered\",\n\t\t\tnamespace:   \"AWS/DynamoDB\",\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"AWS/ElastiCache is registered\",\n\t\t\tnamespace:   \"AWS/ElastiCache\",\n\t\t\texpectError: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"unknown namespace returns error\",\n\t\t\tnamespace:   \"AWS/Unknown\",\n\t\t\texpectError: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tsvc, err := DefaultEnhancedMetricServiceRegistry.GetEnhancedMetricsService(tt.namespace)\n\n\t\t\tassert.Len(t, DefaultEnhancedMetricServiceRegistry.services, 4, \"Expected 4 services to be registered in the default registry\")\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Nil(t, svc)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"Expected namespace %s to be registered\", tt.namespace)\n\t\t\t\tassert.NotNil(t, svc, \"Expected service for namespace %s to be non-nil\", tt.namespace)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRegistry_ChainedRegistration(t *testing.T) {\n\tt.Run(\"chained registration\", func(t *testing.T) {\n\t\tregistry := (&Registry{}).\n\t\t\tRegister(&registryMockMetricsServiceWrapper{\n\t\t\t\tnamespace: \"AWS/Test1\",\n\t\t\t}).\n\t\t\tRegister(&registryMockMetricsServiceWrapper{\n\t\t\t\tnamespace: \"AWS/Test2\",\n\t\t\t}).\n\t\t\tRegister(&registryMockMetricsServiceWrapper{\n\t\t\t\tnamespace: \"AWS/Test3\",\n\t\t\t})\n\n\t\tassert.Len(t, registry.services, 3)\n\n\t\tfor i := 1; i <= 3; i++ {\n\t\t\tnamespace := \"AWS/Test\" + string(rune('0'+i))\n\t\t\t_, err := registry.GetEnhancedMetricsService(namespace)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t})\n}\n\nfunc TestRegistry_ServiceFactory(t *testing.T) {\n\tt.Run(\"service factory is called on each get\", func(t *testing.T) {\n\t\tregistry := &Registry{}\n\t\tcallCount := 0\n\n\t\tregistry.services = map[string]func() service.EnhancedMetricsService{\n\t\t\t\"AWS/Test\": func() service.EnhancedMetricsService {\n\t\t\t\tcallCount++\n\t\t\t\treturn nil\n\t\t\t},\n\t\t}\n\n\t\t// Call multiple times\n\t\tfor i := 0; i < 3; i++ {\n\t\t\t_, _ = registry.GetEnhancedMetricsService(\"AWS/Test\")\n\t\t}\n\n\t\tassert.Equal(t, 3, callCount, \"Factory should be called for each Get\")\n\t})\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/dynamodb/client.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage dynamodb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb/types\"\n)\n\ntype awsClient interface {\n\tDescribeTable(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error)\n}\n\n// AWSDynamoDBClient wraps the AWS DynamoDB client\ntype AWSDynamoDBClient struct {\n\tclient awsClient\n}\n\n// NewDynamoDBClientWithConfig creates a new DynamoDB client with custom AWS configuration\nfunc NewDynamoDBClientWithConfig(cfg aws.Config) Client {\n\treturn &AWSDynamoDBClient{\n\t\tclient: dynamodb.NewFromConfig(cfg),\n\t}\n}\n\n// describeTable retrieves detailed information about a DynamoDB table\nfunc (c *AWSDynamoDBClient) describeTable(ctx context.Context, tableARN string) (*types.TableDescription, error) {\n\tresult, err := c.client.DescribeTable(ctx, &dynamodb.DescribeTableInput{\n\t\t// TableName can be either the table name or ARN\n\t\tTableName: aws.String(tableARN),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to describe table %s: %w\", tableARN, err)\n\t}\n\n\treturn result.Table, nil\n}\n\n// DescribeTables retrieves DynamoDB tables with their descriptions\nfunc (c *AWSDynamoDBClient) DescribeTables(ctx context.Context, logger *slog.Logger, tablesARNs []string) ([]types.TableDescription, error) {\n\tlogger.Debug(\"Describing DynamoDB tables\", \"count\", len(tablesARNs))\n\n\tvar tables []types.TableDescription\n\n\tfor _, arn := range tablesARNs {\n\t\ttableDesc, err := c.describeTable(ctx, arn)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Failed to describe table\", \"error\", err.Error(), \"arn\", arn)\n\t\t\tcontinue\n\t\t}\n\n\t\ttables = append(tables, *tableDesc)\n\t}\n\n\tlogger.Debug(\"Describing DynamoDB tables completed\", \"total_tables\", len(tables))\n\treturn tables, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/dynamodb/client_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage dynamodb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb/types\"\n)\n\nfunc TestAWSDynamoDBClient_DescribeAllTables(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tclient  awsClient\n\t\twant    []types.TableDescription\n\t\twantErr bool\n\t\ttables  []string\n\t}{\n\t\t{\n\t\t\tname:   \"success - single page\",\n\t\t\ttables: []string{\"table-1\"},\n\t\t\tclient: &mockDynamoDBClient{\n\t\t\t\tdescribeTableFunc: func(_ context.Context, params *dynamodb.DescribeTableInput, _ ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error) {\n\t\t\t\t\tif *params.TableName != \"table-1\" {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected table name: %s\", *params.TableName)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &dynamodb.DescribeTableOutput{\n\t\t\t\t\t\tTable: &types.TableDescription{\n\t\t\t\t\t\t\tTableName: aws.String(\"table-1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.TableDescription{\n\t\t\t\t{TableName: aws.String(\"table-1\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:   \"describeTable failure\",\n\t\t\ttables: []string{\"table-1\", \"table-2\"},\n\t\t\tclient: &mockDynamoDBClient{\n\t\t\t\tdescribeTableFunc: func(_ context.Context, params *dynamodb.DescribeTableInput, _ ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error) {\n\t\t\t\t\tif *params.TableName == \"table-1\" {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"describe error\")\n\t\t\t\t\t}\n\n\t\t\t\t\tif *params.TableName != \"table-2\" {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected table name: %s\", *params.TableName)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &dynamodb.DescribeTableOutput{\n\t\t\t\t\t\tTable: &types.TableDescription{\n\t\t\t\t\t\t\tTableName: params.TableName,\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.TableDescription{\n\t\t\t\t{TableName: aws.String(\"table-2\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &AWSDynamoDBClient{\n\t\t\t\tclient: tt.client,\n\t\t\t}\n\t\t\tgot, err := c.DescribeTables(context.Background(), slog.New(slog.DiscardHandler), tt.tables)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"DescribeTables() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"DescribeTables() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// mockDynamoDBClient is a mock implementation of sdk AWS DynamoDB Client\ntype mockDynamoDBClient struct {\n\tdescribeTableFunc func(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error)\n}\n\nfunc (m *mockDynamoDBClient) DescribeTable(ctx context.Context, params *dynamodb.DescribeTableInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DescribeTableOutput, error) {\n\treturn m.describeTableFunc(ctx, params, optFns...)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/dynamodb/service.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage dynamodb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nconst awsDynamoDBNamespace = \"AWS/DynamoDB\"\n\ntype Client interface {\n\t// DescribeTables retrieves DynamoDB tables with their descriptions. tables is a list of table ARNs or table names.\n\tDescribeTables(ctx context.Context, logger *slog.Logger, tables []string) ([]types.TableDescription, error)\n}\n\ntype buildCloudwatchDataFunc func(*model.TaggedResource, *types.TableDescription, []string) ([]*model.CloudwatchData, error)\n\ntype supportedMetric struct {\n\tname                    string\n\tbuildCloudwatchDataFunc buildCloudwatchDataFunc\n\trequiredPermissions     []string\n}\n\nfunc (sm *supportedMetric) buildCloudwatchData(resource *model.TaggedResource, table *types.TableDescription, metrics []string) ([]*model.CloudwatchData, error) {\n\treturn sm.buildCloudwatchDataFunc(resource, table, metrics)\n}\n\ntype DynamoDB struct {\n\tsupportedMetrics map[string]supportedMetric\n\tbuildClientFunc  func(cfg aws.Config) Client\n}\n\nfunc NewDynamoDBService(buildClientFunc func(cfg aws.Config) Client) *DynamoDB {\n\tif buildClientFunc == nil {\n\t\tbuildClientFunc = NewDynamoDBClientWithConfig\n\t}\n\tsvc := &DynamoDB{\n\t\tbuildClientFunc: buildClientFunc,\n\t}\n\n\t// The count of items in the table, updated approximately every six hours; may not reflect recent changes.\n\titemCountMetric := supportedMetric{\n\t\tname:                    \"ItemCount\",\n\t\tbuildCloudwatchDataFunc: buildItemCountMetric,\n\t\trequiredPermissions: []string{\n\t\t\t\"dynamodb:DescribeTable\",\n\t\t},\n\t}\n\n\tsvc.supportedMetrics = map[string]supportedMetric{\n\t\titemCountMetric.name: itemCountMetric,\n\t}\n\n\treturn svc\n}\n\nfunc (s *DynamoDB) GetNamespace() string {\n\treturn awsDynamoDBNamespace\n}\n\nfunc (s *DynamoDB) loadMetricsMetadata(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tregion string,\n\trole model.Role,\n\tconfigProvider config.RegionalConfigProvider,\n\ttablesARNs []string,\n) (map[string]*types.TableDescription, error) {\n\tclient := s.buildClientFunc(*configProvider.GetAWSRegionalConfig(region, role))\n\n\ttables, err := client.DescribeTables(ctx, logger, tablesARNs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing DynamoDB tables in region %s: %w\", region, err)\n\t}\n\n\tregionalData := make(map[string]*types.TableDescription, len(tables))\n\n\tfor _, table := range tables {\n\t\tregionalData[*table.TableArn] = &table\n\t}\n\n\treturn regionalData, nil\n}\n\nfunc (s *DynamoDB) IsMetricSupported(metricName string) bool {\n\t_, exists := s.supportedMetrics[metricName]\n\treturn exists\n}\n\nfunc (s *DynamoDB) GetMetrics(ctx context.Context, logger *slog.Logger, resources []*model.TaggedResource, enhancedMetricConfigs []*model.EnhancedMetricConfig, exportedTagOnMetrics []string, region string, role model.Role, regionalConfigProvider config.RegionalConfigProvider) ([]*model.CloudwatchData, error) {\n\tif len(resources) == 0 || len(enhancedMetricConfigs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\ttablesARNs := make([]string, 0, len(resources))\n\tfor _, resource := range resources {\n\t\ttablesARNs = append(tablesARNs, resource.ARN)\n\t}\n\n\tdata, err := s.loadMetricsMetadata(\n\t\tctx,\n\t\tlogger,\n\t\tregion,\n\t\trole,\n\t\tregionalConfigProvider,\n\t\ttablesARNs,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading DynamoDB metrics metadata: %w\", err)\n\t}\n\n\tvar result []*model.CloudwatchData\n\n\tfor _, resource := range resources {\n\t\tif resource.Namespace != s.GetNamespace() {\n\t\t\tlogger.Warn(\"Resource namespace does not match DynamoDB namespace, skipping\", \"arn\", resource.ARN, \"namespace\", resource.Namespace)\n\t\t\tcontinue\n\t\t}\n\n\t\ttable, exists := data[resource.ARN]\n\t\tif !exists {\n\t\t\tlogger.Warn(\"DynamoDB table not found in data\", \"arn\", resource.ARN)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, enhancedMetric := range enhancedMetricConfigs {\n\t\t\tsupportedMetric, ok := s.supportedMetrics[enhancedMetric.Name]\n\t\t\tif !ok {\n\t\t\t\tlogger.Warn(\"Unsupported DynamoDB enhanced metric, skipping\", \"metric\", enhancedMetric.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tem, err := supportedMetric.buildCloudwatchData(resource, table, exportedTagOnMetrics)\n\t\t\tif err != nil || em == nil {\n\t\t\t\tlogger.Warn(\"Error building DynamoDB enhanced metric\", \"metric\", enhancedMetric.Name, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult = append(result, em...)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc (s *DynamoDB) ListRequiredPermissions() map[string][]string {\n\tpermissions := make(map[string][]string, len(s.supportedMetrics))\n\tfor _, metric := range s.supportedMetrics {\n\t\tpermissions[metric.name] = metric.requiredPermissions\n\t}\n\treturn permissions\n}\n\nfunc (s *DynamoDB) ListSupportedEnhancedMetrics() []string {\n\tvar metrics []string\n\tfor metric := range s.supportedMetrics {\n\t\tmetrics = append(metrics, metric)\n\t}\n\treturn metrics\n}\n\nfunc (s *DynamoDB) Instance() service.EnhancedMetricsService {\n\t// do not use NewDynamoDBService to avoid extra map allocation\n\treturn &DynamoDB{\n\t\tsupportedMetrics: s.supportedMetrics,\n\t\tbuildClientFunc:  s.buildClientFunc,\n\t}\n}\n\nfunc buildItemCountMetric(resource *model.TaggedResource, table *types.TableDescription, exportedTags []string) ([]*model.CloudwatchData, error) {\n\tif table.ItemCount == nil {\n\t\treturn nil, fmt.Errorf(\"ItemCount is nil for DynamoDB table %s\", resource.ARN)\n\t}\n\n\tvar dimensions []model.Dimension\n\n\tif table.TableName != nil {\n\t\tdimensions = []model.Dimension{\n\t\t\t{Name: \"TableName\", Value: *table.TableName},\n\t\t}\n\t}\n\n\tvalue := float64(*table.ItemCount)\n\tresult := []*model.CloudwatchData{{\n\t\tMetricName:   \"ItemCount\",\n\t\tResourceName: resource.ARN,\n\t\tNamespace:    \"AWS/DynamoDB\",\n\t\tDimensions:   dimensions,\n\t\tTags:         resource.MetricTags(exportedTags),\n\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t{\n\t\t\t\t\tValue:     &value,\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}}\n\n\tif len(table.GlobalSecondaryIndexes) > 0 {\n\t\tfor _, globalSecondaryIndex := range table.GlobalSecondaryIndexes {\n\t\t\tif globalSecondaryIndex.ItemCount == nil || globalSecondaryIndex.IndexName == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar secondaryIndexesDimensions []model.Dimension\n\t\t\tglobalSecondaryIndexesItemsCount := float64(*globalSecondaryIndex.ItemCount)\n\n\t\t\tif table.TableName != nil {\n\t\t\t\tsecondaryIndexesDimensions = append(secondaryIndexesDimensions, model.Dimension{\n\t\t\t\t\tName:  \"TableName\",\n\t\t\t\t\tValue: *table.TableName,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tif globalSecondaryIndex.IndexName != nil {\n\t\t\t\tsecondaryIndexesDimensions = append(secondaryIndexesDimensions, model.Dimension{\n\t\t\t\t\tName:  \"GlobalSecondaryIndexName\",\n\t\t\t\t\tValue: *globalSecondaryIndex.IndexName,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tresult = append(result, &model.CloudwatchData{\n\t\t\t\tMetricName:   \"ItemCount\",\n\t\t\t\tResourceName: resource.ARN,\n\t\t\t\tNamespace:    \"AWS/DynamoDB\",\n\t\t\t\tDimensions:   secondaryIndexesDimensions,\n\t\t\t\tTags:         resource.MetricTags(exportedTags),\n\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tValue:     &globalSecondaryIndexesItemsCount,\n\t\t\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\treturn result, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/dynamodb/service_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage dynamodb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/dynamodb/types\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestNewDynamoDBService(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tbuildClientFunc func(cfg aws.Config) Client\n\t}{\n\t\t{\n\t\t\tname:            \"with nil buildClientFunc\",\n\t\t\tbuildClientFunc: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"with custom buildClientFunc\",\n\t\t\tbuildClientFunc: func(_ aws.Config) Client {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := NewDynamoDBService(tt.buildClientFunc)\n\t\t\trequire.NotNil(t, got)\n\t\t\trequire.Len(t, got.supportedMetrics, 1)\n\t\t\trequire.NotNil(t, got.supportedMetrics[\"ItemCount\"])\n\t\t})\n\t}\n}\n\nfunc TestDynamoDB_GetNamespace(t *testing.T) {\n\tservice := NewDynamoDBService(nil)\n\texpectedNamespace := awsDynamoDBNamespace\n\trequire.Equal(t, expectedNamespace, service.GetNamespace())\n}\n\nfunc TestDynamoDB_ListRequiredPermissions(t *testing.T) {\n\tservice := NewDynamoDBService(nil)\n\texpectedPermissions := map[string][]string{\n\t\t\"ItemCount\": {\n\t\t\t\"dynamodb:DescribeTable\",\n\t\t},\n\t}\n\trequire.Equal(t, expectedPermissions, service.ListRequiredPermissions())\n}\n\nfunc TestDynamoDB_ListSupportedEnhancedMetrics(t *testing.T) {\n\tservice := NewDynamoDBService(nil)\n\texpectedMetrics := []string{\n\t\t\"ItemCount\",\n\t}\n\trequire.Equal(t, expectedMetrics, service.ListSupportedEnhancedMetrics())\n}\n\nfunc TestDynamoDB_GetMetrics(t *testing.T) {\n\tdefaultTables := []types.TableDescription{\n\t\t{\n\t\t\tTableArn:  aws.String(\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\"),\n\t\t\tTableName: aws.String(\"test-table\"),\n\t\t\tItemCount: aws.Int64(1000),\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tname                 string\n\t\tresources            []*model.TaggedResource\n\t\tenhancedMetrics      []*model.EnhancedMetricConfig\n\t\texportedTagOnMetrics []string\n\t\ttables               []types.TableDescription\n\t\tdescribeErr          bool\n\t\twantErr              bool\n\t\twantResultCount      int\n\t}{\n\t\t{\n\t\t\tname:            \"empty resources\",\n\t\t\tresources:       []*model.TaggedResource{},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty enhanced metrics\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"wrong namespace\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test\", Namespace: awsDynamoDBNamespace}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"metadata not loaded\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\tdescribeErr:     true,\n\t\t\twantErr:         true,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"successfully received metric\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\", Namespace: awsDynamoDBNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"successfully received metric with global secondary indexes\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-with-gsi\", Namespace: awsDynamoDBNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\ttables: []types.TableDescription{\n\t\t\t\t{\n\t\t\t\t\tTableArn:  aws.String(\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-with-gsi\"),\n\t\t\t\t\tTableName: aws.String(\"test-table-with-gsi\"),\n\t\t\t\t\tItemCount: aws.Int64(1000),\n\t\t\t\t\tGlobalSecondaryIndexes: []types.GlobalSecondaryIndexDescription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIndexName: aws.String(\"test-gsi-1\"),\n\t\t\t\t\t\t\tItemCount: aws.Int64(500),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIndexName: aws.String(\"test-gsi-2\"),\n\t\t\t\t\t\t\tItemCount: aws.Int64(300),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 3, // 1 for table + 2 for GSIs\n\t\t},\n\t\t{\n\t\t\tname: \"resource not found in metadata\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/non-existent\"},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"unsupported metric\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table\"},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"UnsupportedMetric\"}},\n\t\t\ttables:          defaultTables,\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple resources and metrics\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-1\", Namespace: awsDynamoDBNamespace},\n\t\t\t\t{ARN: \"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-2\", Namespace: awsDynamoDBNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics:      []*model.EnhancedMetricConfig{{Name: \"ItemCount\"}},\n\t\t\texportedTagOnMetrics: []string{\"Name\"},\n\t\t\ttables: []types.TableDescription{\n\t\t\t\t{\n\t\t\t\t\tTableArn:  aws.String(\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-1\"),\n\t\t\t\t\tTableName: aws.String(\"test-table-1\"),\n\t\t\t\t\tItemCount: aws.Int64(1000),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tTableArn:  aws.String(\"arn:aws:dynamodb:us-east-1:123456789012:table/test-table-2\"),\n\t\t\t\t\tTableName: aws.String(\"test-table-2\"),\n\t\t\t\t\tItemCount: aws.Int64(2000),\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr:         false,\n\t\t\twantResultCount: 2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tctx := context.Background()\n\t\t\tlogger := slog.New(slog.DiscardHandler)\n\n\t\t\tmockClient := &mockServiceDynamoDBClient{\n\t\t\t\ttables:      tt.tables,\n\t\t\t\tdescribeErr: tt.describeErr,\n\t\t\t}\n\n\t\t\tservice := NewDynamoDBService(func(_ aws.Config) Client {\n\t\t\t\treturn mockClient\n\t\t\t})\n\n\t\t\tmockConfig := &mockConfigProvider{\n\t\t\t\tc: &aws.Config{Region: \"us-east-1\"},\n\t\t\t}\n\n\t\t\tresult, err := service.GetMetrics(ctx, logger, tt.resources, tt.enhancedMetrics, tt.exportedTagOnMetrics, \"us-east-1\", model.Role{}, mockConfig)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\trequire.Len(t, result, tt.wantResultCount)\n\n\t\t\tif tt.wantResultCount > 0 {\n\t\t\t\tfor _, metric := range result {\n\t\t\t\t\trequire.NotNil(t, metric)\n\t\t\t\t\trequire.Equal(t, awsDynamoDBNamespace, metric.Namespace)\n\t\t\t\t\trequire.NotEmpty(t, metric.Dimensions)\n\t\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult)\n\t\t\t\t\trequire.Nil(t, metric.GetMetricStatisticsResult)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype mockServiceDynamoDBClient struct {\n\ttables      []types.TableDescription\n\tdescribeErr bool\n}\n\nfunc (m *mockServiceDynamoDBClient) DescribeTables(context.Context, *slog.Logger, []string) ([]types.TableDescription, error) {\n\tif m.describeErr {\n\t\treturn nil, fmt.Errorf(\"mock describe error\")\n\t}\n\treturn m.tables, nil\n}\n\ntype mockConfigProvider struct {\n\tc *aws.Config\n}\n\nfunc (m *mockConfigProvider) GetAWSRegionalConfig(_ string, _ model.Role) *aws.Config {\n\treturn m.c\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/elasticache/client.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage elasticache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache/types\"\n)\n\ntype awsClient interface {\n\tDescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error)\n}\n\n// AWSElastiCacheClient wraps the AWS ElastiCache client\ntype AWSElastiCacheClient struct {\n\tclient awsClient\n}\n\n// NewElastiCacheClientWithConfig creates a new ElastiCache client with custom AWS configuration\nfunc NewElastiCacheClientWithConfig(cfg aws.Config) Client {\n\treturn &AWSElastiCacheClient{\n\t\tclient: elasticache.NewFromConfig(cfg),\n\t}\n}\n\n// describeCacheClusters retrieves information about cache clusters\nfunc (c *AWSElastiCacheClient) describeCacheClusters(ctx context.Context, input *elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) {\n\tresult, err := c.client.DescribeCacheClusters(ctx, input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to describe cache clusters: %w\", err)\n\t}\n\n\treturn result, nil\n}\n\n// DescribeAllCacheClusters retrieves all cache clusters with pagination support\nfunc (c *AWSElastiCacheClient) DescribeAllCacheClusters(ctx context.Context, logger *slog.Logger) ([]types.CacheCluster, error) {\n\tlogger.Debug(\"Describing all ElastiCache cache clusters\")\n\tvar allClusters []types.CacheCluster\n\tvar marker *string\n\tvar maxRecords int32 = 100\n\tshowNodeInfo := true\n\n\tfor {\n\t\toutput, err := c.describeCacheClusters(ctx, &elasticache.DescribeCacheClustersInput{\n\t\t\tMaxRecords:        &maxRecords,\n\t\t\tMarker:            marker,\n\t\t\tShowCacheNodeInfo: &showNodeInfo,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallClusters = append(allClusters, output.CacheClusters...)\n\n\t\tif output.Marker == nil {\n\t\t\tbreak\n\t\t}\n\t\tmarker = output.Marker\n\t}\n\n\tlogger.Debug(\"Completed describing ElastiCache cache clusters\", slog.Int(\"totalClusters\", len(allClusters)))\n\n\treturn allClusters, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/elasticache/client_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage elasticache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache/types\"\n)\n\nfunc TestAWSElastiCacheClient_DescribeAllCacheClusters(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tclient  awsClient\n\t\twant    []types.CacheCluster\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"success - single page\",\n\t\t\tclient: &mockElastiCacheClient{\n\t\t\t\tdescribeCacheClustersFunc: func(_ context.Context, _ *elasticache.DescribeCacheClustersInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {\n\t\t\t\t\treturn &elasticache.DescribeCacheClustersOutput{\n\t\t\t\t\t\tCacheClusters: []types.CacheCluster{\n\t\t\t\t\t\t\t{CacheClusterId: aws.String(\"cluster-1\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tMarker: nil,\n\t\t\t\t\t}, nil\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.CacheCluster{\n\t\t\t\t{CacheClusterId: aws.String(\"cluster-1\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"success - multiple pages\",\n\t\t\tclient: &mockElastiCacheClient{\n\t\t\t\tdescribeCacheClustersFunc: func() func(_ context.Context, _ *elasticache.DescribeCacheClustersInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {\n\t\t\t\t\tcallCount := 0\n\t\t\t\t\treturn func(_ context.Context, _ *elasticache.DescribeCacheClustersInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {\n\t\t\t\t\t\tcallCount++\n\t\t\t\t\t\tif callCount == 1 {\n\t\t\t\t\t\t\treturn &elasticache.DescribeCacheClustersOutput{\n\t\t\t\t\t\t\t\tCacheClusters: []types.CacheCluster{\n\t\t\t\t\t\t\t\t\t{CacheClusterId: aws.String(\"cluster-1\")},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tMarker: aws.String(\"marker1\"),\n\t\t\t\t\t\t\t}, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn &elasticache.DescribeCacheClustersOutput{\n\t\t\t\t\t\t\tCacheClusters: []types.CacheCluster{\n\t\t\t\t\t\t\t\t{CacheClusterId: aws.String(\"cluster-2\")},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tMarker: nil,\n\t\t\t\t\t\t}, nil\n\t\t\t\t\t}\n\t\t\t\t}(),\n\t\t\t},\n\t\t\twant: []types.CacheCluster{\n\t\t\t\t{CacheClusterId: aws.String(\"cluster-1\")},\n\t\t\t\t{CacheClusterId: aws.String(\"cluster-2\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"error - API failure\",\n\t\t\tclient: &mockElastiCacheClient{\n\t\t\t\tdescribeCacheClustersFunc: func(_ context.Context, _ *elasticache.DescribeCacheClustersInput, _ ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"API error\")\n\t\t\t\t},\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &AWSElastiCacheClient{\n\t\t\t\tclient: tt.client,\n\t\t\t}\n\t\t\tgot, err := c.DescribeAllCacheClusters(context.Background(), slog.New(slog.DiscardHandler))\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"DescribeAllCacheClusters() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"DescribeAllCacheClusters() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// mockElastiCacheClient is a mock implementation of AWS ElastiCache Client\ntype mockElastiCacheClient struct {\n\tdescribeCacheClustersFunc func(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error)\n}\n\nfunc (m *mockElastiCacheClient) DescribeCacheClusters(ctx context.Context, params *elasticache.DescribeCacheClustersInput, optFns ...func(*elasticache.Options)) (*elasticache.DescribeCacheClustersOutput, error) {\n\treturn m.describeCacheClustersFunc(ctx, params, optFns...)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/elasticache/service.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage elasticache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nconst awsElastiCacheNamespace = \"AWS/ElastiCache\"\n\ntype Client interface {\n\tDescribeAllCacheClusters(ctx context.Context, logger *slog.Logger) ([]types.CacheCluster, error)\n}\n\ntype buildCloudwatchDataFunc func(*model.TaggedResource, *types.CacheCluster, []string) (*model.CloudwatchData, error)\n\ntype supportedMetric struct {\n\tname                    string\n\tbuildCloudwatchDataFunc buildCloudwatchDataFunc\n\trequiredPermissions     []string\n}\n\nfunc (sm *supportedMetric) buildCloudwatchData(resource *model.TaggedResource, elasticacheCluster *types.CacheCluster, metrics []string) (*model.CloudwatchData, error) {\n\treturn sm.buildCloudwatchDataFunc(resource, elasticacheCluster, metrics)\n}\n\ntype ElastiCache struct {\n\tsupportedMetrics map[string]supportedMetric\n\tbuildClientFunc  func(cfg aws.Config) Client\n}\n\nfunc NewElastiCacheService(buildClientFunc func(cfg aws.Config) Client) *ElastiCache {\n\tif buildClientFunc == nil {\n\t\tbuildClientFunc = NewElastiCacheClientWithConfig\n\t}\n\tsvc := &ElastiCache{\n\t\tbuildClientFunc: buildClientFunc,\n\t}\n\n\t// The count of cache nodes in the cluster; must be 1 for Valkey or Redis OSS clusters, or between 1 and 40 for Memcached clusters.\n\tnumCacheNodesMetric := supportedMetric{\n\t\tname:                    \"NumCacheNodes\",\n\t\tbuildCloudwatchDataFunc: buildNumCacheNodesMetric,\n\t\trequiredPermissions:     []string{\"elasticache:DescribeCacheClusters\"},\n\t}\n\n\tsvc.supportedMetrics = map[string]supportedMetric{\n\t\tnumCacheNodesMetric.name: numCacheNodesMetric,\n\t}\n\n\treturn svc\n}\n\nfunc (s *ElastiCache) GetNamespace() string {\n\treturn awsElastiCacheNamespace\n}\n\nfunc (s *ElastiCache) loadMetricsMetadata(ctx context.Context, logger *slog.Logger, region string, role model.Role, configProvider config.RegionalConfigProvider) (map[string]*types.CacheCluster, error) {\n\tclient := s.buildClientFunc(*configProvider.GetAWSRegionalConfig(region, role))\n\n\tinstances, err := client.DescribeAllCacheClusters(ctx, logger)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing cache clusters in region %s: %w\", region, err)\n\t}\n\n\tregionalData := make(map[string]*types.CacheCluster, len(instances))\n\n\tfor _, instance := range instances {\n\t\tregionalData[*instance.ARN] = &instance\n\t}\n\n\treturn regionalData, nil\n}\n\nfunc (s *ElastiCache) IsMetricSupported(metricName string) bool {\n\t_, exists := s.supportedMetrics[metricName]\n\treturn exists\n}\n\nfunc (s *ElastiCache) GetMetrics(ctx context.Context, logger *slog.Logger, resources []*model.TaggedResource, enhancedMetricConfigs []*model.EnhancedMetricConfig, exportedTagOnMetrics []string, region string, role model.Role, regionalConfigProvider config.RegionalConfigProvider) ([]*model.CloudwatchData, error) {\n\tif len(resources) == 0 || len(enhancedMetricConfigs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdata, err := s.loadMetricsMetadata(\n\t\tctx,\n\t\tlogger,\n\t\tregion,\n\t\trole,\n\t\tregionalConfigProvider,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't load elasticache metrics metadata: %w\", err)\n\t}\n\n\tvar result []*model.CloudwatchData\n\n\tfor _, resource := range resources {\n\t\tif resource.Namespace != s.GetNamespace() {\n\t\t\tlogger.Warn(\"Resource namespace does not match elasticache namespace, skipping\", \"arn\", resource.ARN, \"namespace\", resource.Namespace)\n\t\t\tcontinue\n\t\t}\n\n\t\telastiCacheCluster, exists := data[resource.ARN]\n\t\tif !exists {\n\t\t\tlogger.Warn(\"ElastiCache cluster not found in data\", \"arn\", resource.ARN)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, enhancedMetric := range enhancedMetricConfigs {\n\t\t\tsupportedMetric, ok := s.supportedMetrics[enhancedMetric.Name]\n\t\t\tif !ok {\n\t\t\t\tlogger.Warn(\"Unsupported elasticache enhanced metric requested\", \"metric\", enhancedMetric.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tem, err := supportedMetric.buildCloudwatchData(resource, elastiCacheCluster, exportedTagOnMetrics)\n\t\t\tif err != nil || em == nil {\n\t\t\t\tlogger.Warn(\"Error building elasticache enhanced metric\", \"metric\", enhancedMetric.Name, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult = append(result, em)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc (s *ElastiCache) ListRequiredPermissions() map[string][]string {\n\trequiredPermissions := make(map[string][]string, len(s.supportedMetrics))\n\tfor metricName, metric := range s.supportedMetrics {\n\t\trequiredPermissions[metricName] = metric.requiredPermissions\n\t}\n\treturn requiredPermissions\n}\n\nfunc (s *ElastiCache) ListSupportedEnhancedMetrics() []string {\n\tvar metrics []string\n\tfor metric := range s.supportedMetrics {\n\t\tmetrics = append(metrics, metric)\n\t}\n\treturn metrics\n}\n\nfunc (s *ElastiCache) Instance() service.EnhancedMetricsService {\n\t// do not use NewElastiCacheService to avoid extra map allocation\n\treturn &ElastiCache{\n\t\tsupportedMetrics: s.supportedMetrics,\n\t\tbuildClientFunc:  s.buildClientFunc,\n\t}\n}\n\nfunc buildNumCacheNodesMetric(resource *model.TaggedResource, cacheCluster *types.CacheCluster, exportedTags []string) (*model.CloudwatchData, error) {\n\tif cacheCluster.NumCacheNodes == nil {\n\t\treturn nil, fmt.Errorf(\"NumCacheNodes is nil for ElastiCache cluster %s\", resource.ARN)\n\t}\n\n\tvar dimensions []model.Dimension\n\n\tif cacheCluster.CacheClusterId != nil {\n\t\tdimensions = []model.Dimension{\n\t\t\t{Name: \"CacheClusterId\", Value: *cacheCluster.CacheClusterId},\n\t\t}\n\t}\n\n\tif cacheCluster.ReplicationGroupId != nil {\n\t\tdimensions = append(dimensions, model.Dimension{\n\t\t\tName:  \"ReplicationGroupId\",\n\t\t\tValue: *cacheCluster.ReplicationGroupId,\n\t\t})\n\t}\n\n\tvalue := float64(*cacheCluster.NumCacheNodes)\n\treturn &model.CloudwatchData{\n\t\tMetricName:   \"NumCacheNodes\",\n\t\tResourceName: resource.ARN,\n\t\tNamespace:    \"AWS/ElastiCache\",\n\t\tDimensions:   dimensions,\n\t\tTags:         resource.MetricTags(exportedTags),\n\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t{\n\t\t\t\t\tValue:     &value,\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/elasticache/service_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage elasticache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/elasticache/types\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestNewElastiCacheService(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tbuildClientFunc func(cfg aws.Config) Client\n\t}{\n\t\t{\n\t\t\tname:            \"with nil buildClientFunc\",\n\t\t\tbuildClientFunc: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"with custom buildClientFunc\",\n\t\t\tbuildClientFunc: func(_ aws.Config) Client {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := NewElastiCacheService(tt.buildClientFunc)\n\t\t\trequire.NotNil(t, got)\n\t\t\trequire.Len(t, got.supportedMetrics, 1)\n\t\t\trequire.NotNil(t, got.supportedMetrics[\"NumCacheNodes\"])\n\t\t})\n\t}\n}\n\nfunc TestElastiCache_GetNamespace(t *testing.T) {\n\tservice := NewElastiCacheService(nil)\n\texpectedNamespace := awsElastiCacheNamespace\n\trequire.Equal(t, expectedNamespace, service.GetNamespace())\n}\n\nfunc TestElastiCache_ListRequiredPermissions(t *testing.T) {\n\tservice := NewElastiCacheService(nil)\n\texpectedPermissions := map[string][]string{\n\t\t\"NumCacheNodes\": {\"elasticache:DescribeCacheClusters\"},\n\t}\n\trequire.Equal(t, expectedPermissions, service.ListRequiredPermissions())\n}\n\nfunc TestElastiCache_ListSupportedEnhancedMetrics(t *testing.T) {\n\tservice := NewElastiCacheService(nil)\n\texpectedMetrics := []string{\n\t\t\"NumCacheNodes\",\n\t}\n\trequire.Equal(t, expectedMetrics, service.ListSupportedEnhancedMetrics())\n}\n\nfunc TestElastiCache_GetMetrics(t *testing.T) {\n\t// Common test data\n\ttestCluster := types.CacheCluster{\n\t\tARN:            aws.String(\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\"),\n\t\tCacheClusterId: aws.String(\"test-cluster\"),\n\t\tNumCacheNodes:  aws.Int32(2),\n\t}\n\n\ttests := []struct {\n\t\tname            string\n\t\tresources       []*model.TaggedResource\n\t\tenhancedMetrics []*model.EnhancedMetricConfig\n\t\tclusters        []types.CacheCluster\n\t\tdescribeErr     bool\n\t\twantErr         bool\n\t\twantResultCount int\n\t}{\n\t\t{\n\t\t\tname:            \"empty resources\",\n\t\t\tresources:       []*model.TaggedResource{},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\tclusters:        []types.CacheCluster{testCluster},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty enhanced metrics\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{},\n\t\t\tclusters:        []types.CacheCluster{testCluster},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"wrong namespace\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname:            \"describe error\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\tdescribeErr:     true,\n\t\t\twantErr:         true,\n\t\t},\n\t\t{\n\t\t\tname:            \"successfully received metric\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\", Namespace: awsElastiCacheNamespace}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\tclusters:        []types.CacheCluster{testCluster},\n\t\t\twantResultCount: 1,\n\t\t},\n\t\t{\n\t\t\tname:            \"resource not found in metadata\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:non-existent\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\tclusters:        []types.CacheCluster{testCluster},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"unsupported metric\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"UnsupportedMetric\"}},\n\t\t\tclusters:        []types.CacheCluster{testCluster},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple resources and metrics\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster-1\", Namespace: awsElastiCacheNamespace},\n\t\t\t\t{ARN: \"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster-2\", Namespace: awsElastiCacheNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"NumCacheNodes\"}},\n\t\t\tclusters: []types.CacheCluster{\n\t\t\t\t{\n\t\t\t\t\tARN:            aws.String(\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster-1\"),\n\t\t\t\t\tCacheClusterId: aws.String(\"test-cluster-1\"),\n\t\t\t\t\tNumCacheNodes:  aws.Int32(1),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tARN:            aws.String(\"arn:aws:elasticache:us-east-1:123456789012:cluster:test-cluster-2\"),\n\t\t\t\t\tCacheClusterId: aws.String(\"test-cluster-2\"),\n\t\t\t\t\tNumCacheNodes:  aws.Int32(3),\n\t\t\t\t},\n\t\t\t},\n\t\t\twantResultCount: 2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tctx := context.Background()\n\t\t\tlogger := slog.New(slog.DiscardHandler)\n\n\t\t\tmockClient := &mockServiceElastiCacheClient{\n\t\t\t\tclusters:    tt.clusters,\n\t\t\t\tdescribeErr: tt.describeErr,\n\t\t\t}\n\n\t\t\tservice := NewElastiCacheService(func(_ aws.Config) Client {\n\t\t\t\treturn mockClient\n\t\t\t})\n\n\t\t\tmockConfig := &mockConfigProvider{\n\t\t\t\tc: &aws.Config{Region: \"us-east-1\"},\n\t\t\t}\n\n\t\t\tresult, err := service.GetMetrics(ctx, logger, tt.resources, tt.enhancedMetrics, nil, \"us-east-1\", model.Role{}, mockConfig)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\trequire.Len(t, result, tt.wantResultCount)\n\n\t\t\tif tt.wantResultCount > 0 {\n\t\t\t\tfor _, metric := range result {\n\t\t\t\t\trequire.NotNil(t, metric)\n\t\t\t\t\trequire.Equal(t, awsElastiCacheNamespace, metric.Namespace)\n\t\t\t\t\trequire.NotEmpty(t, metric.Dimensions)\n\t\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult)\n\t\t\t\t\trequire.Empty(t, metric.GetMetricDataResult.Statistic)\n\t\t\t\t\trequire.Nil(t, metric.GetMetricStatisticsResult)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype mockServiceElastiCacheClient struct {\n\tclusters    []types.CacheCluster\n\tdescribeErr bool\n}\n\nfunc (m *mockServiceElastiCacheClient) DescribeAllCacheClusters(_ context.Context, _ *slog.Logger) ([]types.CacheCluster, error) {\n\tif m.describeErr {\n\t\treturn nil, fmt.Errorf(\"mock describe error\")\n\t}\n\treturn m.clusters, nil\n}\n\ntype mockConfigProvider struct {\n\tc *aws.Config\n}\n\nfunc (m *mockConfigProvider) GetAWSRegionalConfig(_ string, _ model.Role) *aws.Config {\n\treturn m.c\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/lambda/client.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage lambda\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda/types\"\n)\n\ntype awsClient interface {\n\tListFunctions(ctx context.Context, params *lambda.ListFunctionsInput, optFns ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error)\n}\n\n// AWSLambdaClient wraps the AWS Lambda client\ntype AWSLambdaClient struct {\n\tclient awsClient\n}\n\n// NewLambdaClientWithConfig creates a new Lambda client with custom AWS configuration\nfunc NewLambdaClientWithConfig(cfg aws.Config) Client {\n\treturn &AWSLambdaClient{\n\t\tclient: lambda.NewFromConfig(cfg),\n\t}\n}\n\n// listFunctions retrieves a list of Lambda regionalData\nfunc (c *AWSLambdaClient) listFunctions(ctx context.Context, input *lambda.ListFunctionsInput) (*lambda.ListFunctionsOutput, error) {\n\tresult, err := c.client.ListFunctions(ctx, input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list Lambda regionalData: %w\", err)\n\t}\n\n\treturn result, nil\n}\n\n// ListAllFunctions retrieves all Lambda regionalData by handling pagination\nfunc (c *AWSLambdaClient) ListAllFunctions(ctx context.Context, logger *slog.Logger) ([]types.FunctionConfiguration, error) {\n\tlogger.Debug(\"Listing all Lambda functions\")\n\tvar allFunctions []types.FunctionConfiguration\n\tvar marker *string\n\tvar maxItems int32 = 50\n\n\tfor {\n\t\toutput, err := c.listFunctions(ctx, &lambda.ListFunctionsInput{\n\t\t\tMarker:   marker,\n\t\t\tMaxItems: &maxItems,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallFunctions = append(allFunctions, output.Functions...)\n\n\t\tif output.NextMarker == nil {\n\t\t\tbreak\n\t\t}\n\t\tmarker = output.NextMarker\n\t}\n\n\tlogger.Debug(\"Completed listing all Lambda functions\", slog.Int(\"totalFunctions\", len(allFunctions)))\n\treturn allFunctions, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/lambda/client_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage lambda\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda/types\"\n)\n\nfunc TestAWSLambdaClient_ListAllFunctions(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tclient  awsClient\n\t\twant    []types.FunctionConfiguration\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"success - single page\",\n\t\t\tclient: &mockLambdaClient{\n\t\t\t\tlistFunctionsFunc: func(_ context.Context, _ *lambda.ListFunctionsInput, _ ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error) {\n\t\t\t\t\treturn &lambda.ListFunctionsOutput{\n\t\t\t\t\t\tFunctions: []types.FunctionConfiguration{\n\t\t\t\t\t\t\t{FunctionName: aws.String(\"function-1\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNextMarker: nil,\n\t\t\t\t\t}, nil\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.FunctionConfiguration{\n\t\t\t\t{FunctionName: aws.String(\"function-1\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"success - multiple pages\",\n\t\t\tclient: &mockLambdaClient{\n\t\t\t\tlistFunctionsFunc: func() func(_ context.Context, _ *lambda.ListFunctionsInput, _ ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error) {\n\t\t\t\t\tcallCount := 0\n\t\t\t\t\treturn func(_ context.Context, _ *lambda.ListFunctionsInput, _ ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error) {\n\t\t\t\t\t\tcallCount++\n\t\t\t\t\t\tif callCount == 1 {\n\t\t\t\t\t\t\treturn &lambda.ListFunctionsOutput{\n\t\t\t\t\t\t\t\tFunctions: []types.FunctionConfiguration{\n\t\t\t\t\t\t\t\t\t{FunctionName: aws.String(\"function-1\")},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tNextMarker: aws.String(\"marker1\"),\n\t\t\t\t\t\t\t}, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn &lambda.ListFunctionsOutput{\n\t\t\t\t\t\t\tFunctions: []types.FunctionConfiguration{\n\t\t\t\t\t\t\t\t{FunctionName: aws.String(\"function-2\")},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNextMarker: nil,\n\t\t\t\t\t\t}, nil\n\t\t\t\t\t}\n\t\t\t\t}(),\n\t\t\t},\n\t\t\twant: []types.FunctionConfiguration{\n\t\t\t\t{FunctionName: aws.String(\"function-1\")},\n\t\t\t\t{FunctionName: aws.String(\"function-2\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"error - API failure\",\n\t\t\tclient: &mockLambdaClient{\n\t\t\t\tlistFunctionsFunc: func(_ context.Context, _ *lambda.ListFunctionsInput, _ ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"API error\")\n\t\t\t\t},\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &AWSLambdaClient{\n\t\t\t\tclient: tt.client,\n\t\t\t}\n\t\t\tgot, err := c.ListAllFunctions(context.Background(), slog.New(slog.DiscardHandler))\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"ListAllFunctions() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"ListAllFunctions() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// mockLambdaClient is a mock implementation of AWS Lambda Client\ntype mockLambdaClient struct {\n\tlistFunctionsFunc func(ctx context.Context, params *lambda.ListFunctionsInput, optFns ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error)\n}\n\nfunc (m *mockLambdaClient) ListFunctions(ctx context.Context, params *lambda.ListFunctionsInput, optFns ...func(*lambda.Options)) (*lambda.ListFunctionsOutput, error) {\n\treturn m.listFunctionsFunc(ctx, params, optFns...)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/lambda/service.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage lambda\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nconst awsLambdaNamespace = \"AWS/Lambda\"\n\ntype Client interface {\n\tListAllFunctions(ctx context.Context, logger *slog.Logger) ([]types.FunctionConfiguration, error)\n}\n\ntype buildCloudwatchDataFunc func(*model.TaggedResource, *types.FunctionConfiguration, []string) (*model.CloudwatchData, error)\n\ntype supportedMetric struct {\n\tname                    string\n\tbuildCloudwatchDataFunc buildCloudwatchDataFunc\n\trequiredPermissions     []string\n}\n\nfunc (sm *supportedMetric) buildCloudwatchData(resource *model.TaggedResource, functionConfiguration *types.FunctionConfiguration, exportedTagOnMetrics []string) (*model.CloudwatchData, error) {\n\treturn sm.buildCloudwatchDataFunc(resource, functionConfiguration, exportedTagOnMetrics)\n}\n\ntype Lambda struct {\n\tsupportedMetrics map[string]supportedMetric\n\tbuildClientFunc  func(cfg aws.Config) Client\n}\n\nfunc NewLambdaService(buildClientFunc func(cfg aws.Config) Client) *Lambda {\n\tif buildClientFunc == nil {\n\t\tbuildClientFunc = NewLambdaClientWithConfig\n\t}\n\tsvc := &Lambda{\n\t\tbuildClientFunc: buildClientFunc,\n\t}\n\n\t// The maximum execution duration permitted for the function before termination.\n\ttimeoutMetric := supportedMetric{\n\t\tname:                    \"Timeout\",\n\t\tbuildCloudwatchDataFunc: buildTimeoutMetric,\n\t\trequiredPermissions:     []string{\"lambda:ListFunctions\"},\n\t}\n\n\tsvc.supportedMetrics = map[string]supportedMetric{\n\t\ttimeoutMetric.name: timeoutMetric,\n\t}\n\n\treturn svc\n}\n\nfunc (s *Lambda) GetNamespace() string {\n\treturn awsLambdaNamespace\n}\n\nfunc (s *Lambda) loadMetricsMetadata(ctx context.Context, logger *slog.Logger, region string, role model.Role, configProvider config.RegionalConfigProvider) (map[string]*types.FunctionConfiguration, error) {\n\tclient := s.buildClientFunc(*configProvider.GetAWSRegionalConfig(region, role))\n\n\tinstances, err := client.ListAllFunctions(ctx, logger)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing functions in region %s: %w\", region, err)\n\t}\n\n\tregionalData := make(map[string]*types.FunctionConfiguration, len(instances))\n\tfor _, instance := range instances {\n\t\tregionalData[*instance.FunctionArn] = &instance\n\t}\n\n\tlogger.Info(\"Loaded Lambda metrics metadata\", \"region\", region)\n\treturn regionalData, nil\n}\n\nfunc (s *Lambda) IsMetricSupported(metricName string) bool {\n\t_, exists := s.supportedMetrics[metricName]\n\treturn exists\n}\n\nfunc (s *Lambda) GetMetrics(ctx context.Context, logger *slog.Logger, resources []*model.TaggedResource, enhancedMetricConfigs []*model.EnhancedMetricConfig, exportedTagOnMetrics []string, region string, role model.Role, regionalConfigProvider config.RegionalConfigProvider) ([]*model.CloudwatchData, error) {\n\tif len(resources) == 0 || len(enhancedMetricConfigs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdata, err := s.loadMetricsMetadata(\n\t\tctx,\n\t\tlogger,\n\t\tregion,\n\t\trole,\n\t\tregionalConfigProvider,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading lambda metrics metadata: %w\", err)\n\t}\n\n\tvar result []*model.CloudwatchData\n\n\tfor _, resource := range resources {\n\t\tif resource.Namespace != s.GetNamespace() {\n\t\t\tlogger.Warn(\"Resource namespace does not match Lambda namespace, skipping\", \"arn\", resource.ARN, \"namespace\", resource.Namespace)\n\t\t\tcontinue\n\t\t}\n\n\t\tfunctionConfiguration, exists := data[resource.ARN]\n\t\tif !exists {\n\t\t\tlogger.Warn(\"Lambda function not found in data\", \"arn\", resource.ARN)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, enhancedMetric := range enhancedMetricConfigs {\n\t\t\tsupportedMetric, ok := s.supportedMetrics[enhancedMetric.Name]\n\t\t\tif !ok {\n\t\t\t\tlogger.Warn(\"Unsupported Lambda enhanced metric, skipping\", \"metric\", enhancedMetric.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tem, err := supportedMetric.buildCloudwatchData(resource, functionConfiguration, exportedTagOnMetrics)\n\t\t\tif err != nil || em == nil {\n\t\t\t\tlogger.Warn(\"Error building Lambda enhanced metric\", \"metric\", enhancedMetric.Name, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult = append(result, em)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc (s *Lambda) ListRequiredPermissions() map[string][]string {\n\tpermissions := make(map[string][]string, len(s.supportedMetrics))\n\tfor _, metric := range s.supportedMetrics {\n\t\tpermissions[metric.name] = metric.requiredPermissions\n\t}\n\treturn permissions\n}\n\nfunc (s *Lambda) ListSupportedEnhancedMetrics() []string {\n\tvar metrics []string\n\tfor metric := range s.supportedMetrics {\n\t\tmetrics = append(metrics, metric)\n\t}\n\treturn metrics\n}\n\nfunc (s *Lambda) Instance() service.EnhancedMetricsService {\n\t// do not use NewLambdaService to avoid extra map allocation\n\treturn &Lambda{\n\t\tsupportedMetrics: s.supportedMetrics,\n\t\tbuildClientFunc:  s.buildClientFunc,\n\t}\n}\n\nfunc buildTimeoutMetric(resource *model.TaggedResource, fn *types.FunctionConfiguration, exportedTags []string) (*model.CloudwatchData, error) {\n\tif fn.Timeout == nil {\n\t\treturn nil, fmt.Errorf(\"timeout is nil for Lambda function %s\", resource.ARN)\n\t}\n\n\tvar dimensions []model.Dimension\n\n\tif fn.FunctionName != nil {\n\t\tdimensions = []model.Dimension{\n\t\t\t{Name: \"FunctionName\", Value: *fn.FunctionName},\n\t\t}\n\t}\n\n\tvalue := float64(*fn.Timeout)\n\treturn &model.CloudwatchData{\n\t\tMetricName:   \"Timeout\",\n\t\tResourceName: resource.ARN,\n\t\tNamespace:    \"AWS/Lambda\",\n\t\tDimensions:   dimensions,\n\t\tTags:         resource.MetricTags(exportedTags),\n\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t{\n\t\t\t\t\tValue:     &value,\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/lambda/service_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage lambda\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/lambda/types\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestNewLambdaService(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tbuildClientFunc func(cfg aws.Config) Client\n\t}{\n\t\t{\n\t\t\tname:            \"with nil buildClientFunc\",\n\t\t\tbuildClientFunc: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"with custom buildClientFunc\",\n\t\t\tbuildClientFunc: func(_ aws.Config) Client {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := NewLambdaService(tt.buildClientFunc)\n\t\t\trequire.NotNil(t, got)\n\t\t\trequire.Len(t, got.supportedMetrics, 1)\n\t\t\trequire.NotNil(t, got.supportedMetrics[\"Timeout\"])\n\t\t})\n\t}\n}\n\nfunc TestLambda_GetNamespace(t *testing.T) {\n\tservice := NewLambdaService(nil)\n\texpectedNamespace := awsLambdaNamespace\n\trequire.Equal(t, expectedNamespace, service.GetNamespace())\n}\n\nfunc TestLambda_ListRequiredPermissions(t *testing.T) {\n\tservice := NewLambdaService(nil)\n\texpectedPermissions := map[string][]string{\n\t\t\"Timeout\": {\"lambda:ListFunctions\"},\n\t}\n\trequire.Equal(t, expectedPermissions, service.ListRequiredPermissions())\n}\n\nfunc TestLambda_ListSupportedEnhancedMetrics(t *testing.T) {\n\tservice := NewLambdaService(nil)\n\texpectedMetrics := []string{\n\t\t\"Timeout\",\n\t}\n\trequire.Equal(t, expectedMetrics, service.ListSupportedEnhancedMetrics())\n}\n\nfunc TestLambda_GetMetrics(t *testing.T) {\n\tmakeFunctionConfiguration := func(name string, timeout int32) types.FunctionConfiguration {\n\t\tarn := fmt.Sprintf(\"arn:aws:lambda:us-east-1:123456789012:function:%s\", name)\n\t\treturn types.FunctionConfiguration{\n\t\t\tFunctionArn:  aws.String(arn),\n\t\t\tFunctionName: aws.String(name),\n\t\t\tTimeout:      aws.Int32(timeout),\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname            string\n\t\tresources       []*model.TaggedResource\n\t\tenhancedMetrics []*model.EnhancedMetricConfig\n\t\tfunctions       []types.FunctionConfiguration\n\t\twantErr         bool\n\t\twantCount       int\n\t}{\n\t\t{\n\t\t\tname:            \"empty resources returns empty\",\n\t\t\tresources:       []*model.TaggedResource{},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"Timeout\"}},\n\t\t\tfunctions:       []types.FunctionConfiguration{makeFunctionConfiguration(\"test\", 300)},\n\t\t\twantCount:       0,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty enhanced metrics returns empty\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{},\n\t\t\tfunctions:       []types.FunctionConfiguration{makeFunctionConfiguration(\"test\", 300)},\n\t\t\twantCount:       0,\n\t\t},\n\t\t{\n\t\t\tname:            \"wrong namespace returns error\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:test\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"Timeout\"}},\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname: \"successfully received single metric\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:test\", Namespace: awsLambdaNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"Timeout\"}},\n\t\t\tfunctions:       []types.FunctionConfiguration{makeFunctionConfiguration(\"test\", 300)},\n\t\t\twantCount:       1,\n\t\t},\n\t\t{\n\t\t\tname: \"skips unsupported metrics\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:test\"},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"UnsupportedMetric\"}},\n\t\t\tfunctions:       []types.FunctionConfiguration{makeFunctionConfiguration(\"test\", 300)},\n\t\t\twantCount:       0,\n\t\t},\n\t\t{\n\t\t\tname: \"processes multiple resources\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:func1\", Namespace: awsLambdaNamespace},\n\t\t\t\t{ARN: \"arn:aws:lambda:us-east-1:123456789012:function:func2\", Namespace: awsLambdaNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"Timeout\"}},\n\t\t\tfunctions:       []types.FunctionConfiguration{makeFunctionConfiguration(\"func1\", 300), makeFunctionConfiguration(\"func2\", 600)},\n\t\t\twantCount:       2,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tservice := NewLambdaService(func(_ aws.Config) Client {\n\t\t\t\treturn &mockServiceLambdaClient{functions: tt.functions}\n\t\t\t})\n\n\t\t\tresult, err := service.GetMetrics(context.Background(), slog.New(slog.DiscardHandler), tt.resources, tt.enhancedMetrics, nil, \"us-east-1\", model.Role{}, &mockConfigProvider{c: &aws.Config{Region: \"us-east-1\"}})\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, result, tt.wantCount)\n\n\t\t\tfor _, metric := range result {\n\t\t\t\trequire.Equal(t, awsLambdaNamespace, metric.Namespace)\n\t\t\t\trequire.NotEmpty(t, metric.Dimensions)\n\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult)\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype mockServiceLambdaClient struct {\n\tfunctions []types.FunctionConfiguration\n}\n\nfunc (m *mockServiceLambdaClient) ListAllFunctions(_ context.Context, _ *slog.Logger) ([]types.FunctionConfiguration, error) {\n\treturn m.functions, nil\n}\n\ntype mockConfigProvider struct {\n\tc *aws.Config\n}\n\nfunc (m *mockConfigProvider) GetAWSRegionalConfig(_ string, _ model.Role) *aws.Config {\n\treturn m.c\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/rds/client.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage rds\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds/types\"\n)\n\ntype awsClient interface {\n\tDescribeDBInstances(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error)\n}\n\n// AWSRDSClient wraps the AWS RDS client\ntype AWSRDSClient struct {\n\tclient awsClient\n}\n\n// NewRDSClientWithConfig creates a new RDS client with custom AWS configuration\nfunc NewRDSClientWithConfig(cfg aws.Config) Client {\n\treturn &AWSRDSClient{\n\t\tclient: rds.NewFromConfig(cfg),\n\t}\n}\n\n// describeDBInstances retrieves information about provisioned RDS instances\nfunc (c *AWSRDSClient) describeDBInstances(ctx context.Context, input *rds.DescribeDBInstancesInput) (*rds.DescribeDBInstancesOutput, error) {\n\tresult, err := c.client.DescribeDBInstances(ctx, input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to describe DB instances: %w\", err)\n\t}\n\treturn result, nil\n}\n\n// DescribeAllDBInstances retrieves all DB instances by handling pagination\nfunc (c *AWSRDSClient) DescribeDBInstances(ctx context.Context, logger *slog.Logger, dbInstances []string) ([]types.DBInstance, error) {\n\tlogger.Debug(\"Describing all RDS DB instances\")\n\tvar allInstances []types.DBInstance\n\tvar marker *string\n\tmaxRecords := aws.Int32(100)\n\n\tfor {\n\t\toutput, err := c.describeDBInstances(ctx, &rds.DescribeDBInstancesInput{\n\t\t\tMarker:     marker,\n\t\t\tMaxRecords: maxRecords,\n\t\t\tFilters: []types.Filter{\n\t\t\t\t{\n\t\t\t\t\tName:   aws.String(\"db-instance-id\"),\n\t\t\t\t\tValues: dbInstances,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tallInstances = append(allInstances, output.DBInstances...)\n\n\t\tif output.Marker == nil {\n\t\t\tbreak\n\t\t}\n\t\tmarker = output.Marker\n\t}\n\n\tlogger.Debug(\"Completed describing RDS DB instances\", slog.Int(\"totalInstances\", len(allInstances)))\n\treturn allInstances, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/rds/client_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage rds\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds/types\"\n)\n\nfunc TestAWSRDSClient_DescribeDBInstances(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tclient    awsClient\n\t\twant      []types.DBInstance\n\t\twantErr   bool\n\t\tinstances []string\n\t}{\n\t\t{\n\t\t\tname:      \"success - single page\",\n\t\t\tinstances: []string{\"db-1\"},\n\t\t\tclient: &mockRDSClient{\n\t\t\t\tdescribeDBInstancesFunc: func(_ context.Context, params *rds.DescribeDBInstancesInput, _ ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) {\n\t\t\t\t\tif len(params.Filters) != 1 || *params.Filters[0].Name != \"db-instance-id\" {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected filter: %v\", params.Filters)\n\t\t\t\t\t}\n\t\t\t\t\treturn &rds.DescribeDBInstancesOutput{\n\t\t\t\t\t\tDBInstances: []types.DBInstance{\n\t\t\t\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-1\")},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tMarker: nil,\n\t\t\t\t\t}, nil\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: []types.DBInstance{\n\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-1\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:      \"success - multiple pages\",\n\t\t\tinstances: []string{\"db-1\", \"db-2\"},\n\t\t\tclient: &mockRDSClient{\n\t\t\t\tdescribeDBInstancesFunc: func() func(_ context.Context, params *rds.DescribeDBInstancesInput, _ ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) {\n\t\t\t\t\tcallCount := 0\n\t\t\t\t\treturn func(_ context.Context, params *rds.DescribeDBInstancesInput, _ ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) {\n\t\t\t\t\t\tif len(params.Filters) != 1 || *params.Filters[0].Name != \"db-instance-id\" {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected filter: %v\", params.Filters)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif params.Filters[0].Values[0] != \"db-1\" || params.Filters[0].Values[1] != \"db-2\" {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected filter values: %v\", params.Filters[0].Values)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tcallCount++\n\t\t\t\t\t\tif callCount == 1 {\n\t\t\t\t\t\t\treturn &rds.DescribeDBInstancesOutput{\n\t\t\t\t\t\t\t\tDBInstances: []types.DBInstance{\n\t\t\t\t\t\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-1\")},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tMarker: aws.String(\"marker1\"),\n\t\t\t\t\t\t\t}, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn &rds.DescribeDBInstancesOutput{\n\t\t\t\t\t\t\tDBInstances: []types.DBInstance{\n\t\t\t\t\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-2\")},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tMarker: nil,\n\t\t\t\t\t\t}, nil\n\t\t\t\t\t}\n\t\t\t\t}(),\n\t\t\t},\n\t\t\twant: []types.DBInstance{\n\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-1\")},\n\t\t\t\t{DBInstanceIdentifier: aws.String(\"db-2\")},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"error - API failure\",\n\t\t\tclient: &mockRDSClient{\n\t\t\t\tdescribeDBInstancesFunc: func(_ context.Context, _ *rds.DescribeDBInstancesInput, _ ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"API error\")\n\t\t\t\t},\n\t\t\t},\n\t\t\twant:    nil,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := &AWSRDSClient{\n\t\t\t\tclient: tt.client,\n\t\t\t}\n\t\t\tgot, err := c.DescribeDBInstances(context.Background(), slog.New(slog.DiscardHandler), tt.instances)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"DescribeDBInstances() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"DescribeDBInstances() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// mockRDSClient is a mock implementation of AWS RDS Client\ntype mockRDSClient struct {\n\tdescribeDBInstancesFunc func(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error)\n}\n\nfunc (m *mockRDSClient) DescribeDBInstances(ctx context.Context, params *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) (*rds.DescribeDBInstancesOutput, error) {\n\treturn m.describeDBInstancesFunc(ctx, params, optFns...)\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/rds/service.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage rds\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds/types\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nconst awsRdsNamespace = \"AWS/RDS\"\n\ntype Client interface {\n\tDescribeDBInstances(ctx context.Context, logger *slog.Logger, dbInstances []string) ([]types.DBInstance, error)\n}\n\ntype buildCloudwatchData func(*model.TaggedResource, *types.DBInstance, []string) (*model.CloudwatchData, error)\n\ntype supportedMetric struct {\n\tname                    string\n\tbuildCloudwatchDataFunc buildCloudwatchData\n\trequiredPermissions     []string\n}\n\nfunc (sm *supportedMetric) buildCloudwatchData(resource *model.TaggedResource, instance *types.DBInstance, metrics []string) (*model.CloudwatchData, error) {\n\treturn sm.buildCloudwatchDataFunc(resource, instance, metrics)\n}\n\ntype RDS struct {\n\tsupportedMetrics map[string]supportedMetric\n\tbuildClientFunc  func(cfg aws.Config) Client\n}\n\nfunc NewRDSService(buildClientFunc func(cfg aws.Config) Client) *RDS {\n\tif buildClientFunc == nil {\n\t\tbuildClientFunc = NewRDSClientWithConfig\n\t}\n\n\trds := &RDS{\n\t\tbuildClientFunc: buildClientFunc,\n\t}\n\n\t// The storage capacity in gibibytes (GiB) allocated for the DB instance.\n\tallocatedStorageMetrics := supportedMetric{\n\t\tname:                    \"AllocatedStorage\",\n\t\tbuildCloudwatchDataFunc: buildAllocatedStorageMetric,\n\t\trequiredPermissions:     []string{\"rds:DescribeDBInstances\"},\n\t}\n\trds.supportedMetrics = map[string]supportedMetric{\n\t\tallocatedStorageMetrics.name: allocatedStorageMetrics,\n\t}\n\n\treturn rds\n}\n\n// GetNamespace returns the AWS CloudWatch namespace for RDS\nfunc (s *RDS) GetNamespace() string {\n\treturn awsRdsNamespace\n}\n\n// loadMetricsMetadata loads any metadata needed for RDS enhanced metrics for the given region and role\nfunc (s *RDS) loadMetricsMetadata(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tregion string,\n\trole model.Role,\n\tconfigProvider config.RegionalConfigProvider,\n\tdbInstances []string,\n) (map[string]*types.DBInstance, error) {\n\tclient := s.buildClientFunc(*configProvider.GetAWSRegionalConfig(region, role))\n\n\tinstances, err := client.DescribeDBInstances(ctx, logger, dbInstances)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error describing RDS DB instances in region %s: %w\", region, err)\n\t}\n\n\tregionalData := make(map[string]*types.DBInstance, len(instances))\n\n\tfor _, instance := range instances {\n\t\tregionalData[*instance.DBInstanceArn] = &instance\n\t}\n\n\treturn regionalData, nil\n}\n\nfunc (s *RDS) IsMetricSupported(metricName string) bool {\n\t_, exists := s.supportedMetrics[metricName]\n\treturn exists\n}\n\nfunc (s *RDS) GetMetrics(ctx context.Context, logger *slog.Logger, resources []*model.TaggedResource, enhancedMetricConfigs []*model.EnhancedMetricConfig, exportedTagOnMetrics []string, region string, role model.Role, regionalConfigProvider config.RegionalConfigProvider) ([]*model.CloudwatchData, error) {\n\tif len(resources) == 0 || len(enhancedMetricConfigs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdbInstances := make([]string, 0, len(resources))\n\tfor _, resource := range resources {\n\t\tdbInstances = append(dbInstances, resource.ARN)\n\t}\n\n\tdata, err := s.loadMetricsMetadata(\n\t\tctx,\n\t\tlogger,\n\t\tregion,\n\t\trole,\n\t\tregionalConfigProvider,\n\t\tdbInstances,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading RDS metrics metadata: %w\", err)\n\t}\n\n\tvar result []*model.CloudwatchData\n\n\tfor _, resource := range resources {\n\t\tif resource.Namespace != s.GetNamespace() {\n\t\t\tlogger.Warn(\"RDS enhanced metrics service cannot process resource with different namespace\", \"namespace\", resource.Namespace, \"arn\", resource.ARN)\n\t\t\tcontinue\n\t\t}\n\n\t\tdbInstance, exists := data[resource.ARN]\n\t\tif !exists {\n\t\t\tlogger.Warn(\"RDS DB instance not found in metadata\", \"arn\", resource.ARN)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, enhancedMetric := range enhancedMetricConfigs {\n\t\t\tsupportedMetric, ok := s.supportedMetrics[enhancedMetric.Name]\n\t\t\tif !ok {\n\t\t\t\tlogger.Warn(\"Unsupported RDS enhanced metric requested\", \"metric\", enhancedMetric.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tem, err := supportedMetric.buildCloudwatchData(resource, dbInstance, exportedTagOnMetrics)\n\t\t\tif err != nil || em == nil {\n\t\t\t\tlogger.Warn(\"Error building RDS enhanced metric\", \"metric\", enhancedMetric.Name, \"error\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult = append(result, em)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc (s *RDS) ListRequiredPermissions() map[string][]string {\n\trequiredPermissions := make(map[string][]string, len(s.supportedMetrics))\n\tfor metricName, metric := range s.supportedMetrics {\n\t\trequiredPermissions[metricName] = metric.requiredPermissions\n\t}\n\treturn requiredPermissions\n}\n\nfunc (s *RDS) ListSupportedEnhancedMetrics() []string {\n\tvar metrics []string\n\tfor metric := range s.supportedMetrics {\n\t\tmetrics = append(metrics, metric)\n\t}\n\treturn metrics\n}\n\nfunc (s *RDS) Instance() service.EnhancedMetricsService {\n\t// do not use NewRDSService to avoid extra map allocation\n\treturn &RDS{\n\t\tsupportedMetrics: s.supportedMetrics,\n\t\tbuildClientFunc:  s.buildClientFunc,\n\t}\n}\n\nfunc buildAllocatedStorageMetric(resource *model.TaggedResource, instance *types.DBInstance, exportedTags []string) (*model.CloudwatchData, error) {\n\tif instance.AllocatedStorage == nil {\n\t\treturn nil, fmt.Errorf(\"AllocatedStorage is nil for DB instance %s\", resource.ARN)\n\t}\n\n\tvar dimensions []model.Dimension\n\n\tif instance.DBInstanceIdentifier != nil && len(*instance.DBInstanceIdentifier) > 0 {\n\t\tdimensions = append(dimensions, model.Dimension{\n\t\t\tName:  \"DBInstanceIdentifier\",\n\t\t\tValue: *instance.DBInstanceIdentifier,\n\t\t})\n\t}\n\n\tif instance.DBInstanceClass != nil && len(*instance.DBInstanceClass) > 0 {\n\t\tdimensions = append(dimensions, model.Dimension{\n\t\t\tName:  \"DatabaseClass\",\n\t\t\tValue: *instance.DBInstanceClass,\n\t\t})\n\t}\n\n\tif instance.Engine != nil && len(*instance.Engine) > 0 {\n\t\tdimensions = append(dimensions, model.Dimension{\n\t\t\tName:  \"EngineName\",\n\t\t\tValue: *instance.Engine,\n\t\t})\n\t}\n\n\t// Convert from GiB to bytes\n\tvalueInBytes := float64(*instance.AllocatedStorage) * 1024 * 1024 * 1024\n\n\treturn &model.CloudwatchData{\n\t\tMetricName:   \"AllocatedStorage\",\n\t\tResourceName: resource.ARN,\n\t\tNamespace:    awsRdsNamespace,\n\t\tDimensions:   dimensions,\n\t\tTags:         resource.MetricTags(exportedTags),\n\n\t\t// Store the value as a single data point\n\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t{\n\t\t\t\t\tValue:     &valueInBytes,\n\t\t\t\t\tTimestamp: time.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/rds/service_test.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage rds\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/rds/types\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestNewRDSService(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tbuildClientFunc func(cfg aws.Config) Client\n\t}{\n\t\t{\n\t\t\tname:            \"with nil buildClientFunc\",\n\t\t\tbuildClientFunc: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"with custom buildClientFunc\",\n\t\t\tbuildClientFunc: func(_ aws.Config) Client {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := NewRDSService(tt.buildClientFunc)\n\t\t\trequire.NotNil(t, got)\n\t\t\trequire.Len(t, got.supportedMetrics, 1)\n\t\t\trequire.NotNil(t, got.supportedMetrics[\"AllocatedStorage\"])\n\t\t})\n\t}\n}\n\nfunc TestRDS_GetNamespace(t *testing.T) {\n\tservice := NewRDSService(nil)\n\texpectedNamespace := awsRdsNamespace\n\trequire.Equal(t, expectedNamespace, service.GetNamespace())\n}\n\nfunc TestRDS_ListRequiredPermissions(t *testing.T) {\n\tservice := NewRDSService(nil)\n\texpectedPermissions := map[string][]string{\n\t\t\"AllocatedStorage\": {\"rds:DescribeDBInstances\"},\n\t}\n\trequire.Equal(t, expectedPermissions, service.ListRequiredPermissions())\n}\n\nfunc TestRDS_ListSupportedEnhancedMetrics(t *testing.T) {\n\tservice := NewRDSService(nil)\n\texpectedMetrics := []string{\n\t\t\"AllocatedStorage\",\n\t}\n\trequire.Equal(t, expectedMetrics, service.ListSupportedEnhancedMetrics())\n}\n\nfunc TestRDS_GetMetrics(t *testing.T) {\n\ttestInstance := makeTestDBInstance(\"test-instance\", 100)\n\ttestARN := *testInstance.DBInstanceArn\n\n\ttests := []struct {\n\t\tname            string\n\t\tresources       []*model.TaggedResource\n\t\tenhancedMetrics []*model.EnhancedMetricConfig\n\t\tregionalData    map[string]*types.DBInstance\n\t\twantErr         bool\n\t\twantResultCount int\n\t\twantValues      []float64 // Expected values in bytes\n\t}{\n\t\t{\n\t\t\tname:            \"empty resources\",\n\t\t\tresources:       []*model.TaggedResource{},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"empty enhanced metrics\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: testARN}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"wrong namespace\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: testARN}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantErr:         false,\n\t\t},\n\t\t{\n\t\t\tname:            \"metadata not loaded\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: testARN}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData:    nil,\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"successfully received metric\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: testARN, Namespace: awsRdsNamespace}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantResultCount: 1,\n\t\t\twantValues:      []float64{107374182400}, // 100 GiB in bytes\n\t\t},\n\t\t{\n\t\t\tname:            \"resource not found in metadata\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: \"arn:aws:rds:us-east-1:123456789012:db:non-existent\"}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:            \"unsupported metric\",\n\t\t\tresources:       []*model.TaggedResource{{ARN: testARN}},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"UnsupportedMetric\"}},\n\t\t\tregionalData:    map[string]*types.DBInstance{testARN: testInstance},\n\t\t\twantResultCount: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"multiple resources\",\n\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t{ARN: \"arn:aws:rds:us-east-1:123456789012:db:test-instance-1\", Namespace: awsRdsNamespace},\n\t\t\t\t{ARN: \"arn:aws:rds:us-east-1:123456789012:db:test-instance-2\", Namespace: awsRdsNamespace},\n\t\t\t},\n\t\t\tenhancedMetrics: []*model.EnhancedMetricConfig{{Name: \"AllocatedStorage\"}},\n\t\t\tregionalData: map[string]*types.DBInstance{\n\t\t\t\t\"arn:aws:rds:us-east-1:123456789012:db:test-instance-1\": makeTestDBInstance(\"test-instance-1\", 100),\n\t\t\t\t\"arn:aws:rds:us-east-1:123456789012:db:test-instance-2\": makeTestDBInstance(\"test-instance-2\", 200),\n\t\t\t},\n\t\t\twantResultCount: 2,\n\t\t\twantValues:      []float64{107374182400, 214748364800}, // 100 and 200 GiB in bytes\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tservice := newTestRDSService(tt.regionalData)\n\t\t\tresult, err := service.GetMetrics(context.Background(), slog.New(slog.DiscardHandler), tt.resources, tt.enhancedMetrics, nil, \"us-east-1\", model.Role{}, &mockConfigProvider{c: &aws.Config{Region: \"us-east-1\"}})\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, result, tt.wantResultCount)\n\n\t\t\tfor i, metric := range result {\n\t\t\t\trequire.Equal(t, awsRdsNamespace, metric.Namespace)\n\t\t\t\trequire.NotEmpty(t, metric.Dimensions)\n\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult)\n\t\t\t\trequire.Nil(t, metric.GetMetricStatisticsResult)\n\n\t\t\t\t// Validate the actual value if wantValues is specified\n\t\t\t\tif len(tt.wantValues) > 0 {\n\t\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult.DataPoints)\n\t\t\t\t\trequire.Len(t, metric.GetMetricDataResult.DataPoints, 1)\n\t\t\t\t\trequire.NotNil(t, metric.GetMetricDataResult.DataPoints[0].Value)\n\t\t\t\t\trequire.Equal(t, tt.wantValues[i], *metric.GetMetricDataResult.DataPoints[0].Value,\n\t\t\t\t\t\t\"expected value in bytes for AllocatedStorage\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype mockServiceRDSClient struct {\n\tinstances   []types.DBInstance\n\tdescribeErr bool\n}\n\nfunc (m *mockServiceRDSClient) DescribeDBInstances(context.Context, *slog.Logger, []string) ([]types.DBInstance, error) {\n\tif m.describeErr {\n\t\treturn nil, fmt.Errorf(\"mock describe error\")\n\t}\n\treturn m.instances, nil\n}\n\ntype mockConfigProvider struct {\n\tc *aws.Config\n}\n\nfunc (m *mockConfigProvider) GetAWSRegionalConfig(_ string, _ model.Role) *aws.Config {\n\treturn m.c\n}\n\n// Helper functions for test setup\n\nfunc makeTestDBInstance(name string, storage int32) *types.DBInstance {\n\tarn := fmt.Sprintf(\"arn:aws:rds:us-east-1:123456789012:db:%s\", name)\n\treturn &types.DBInstance{\n\t\tDBInstanceArn:        aws.String(arn),\n\t\tDBInstanceIdentifier: aws.String(name),\n\t\tDBInstanceClass:      aws.String(\"db.t3.micro\"),\n\t\tEngine:               aws.String(\"postgres\"),\n\t\tAllocatedStorage:     aws.Int32(storage),\n\t}\n}\n\nfunc newTestRDSService(regionalData map[string]*types.DBInstance) *RDS {\n\treturn NewRDSService(func(_ aws.Config) Client {\n\t\treturn &mockServiceRDSClient{\n\t\t\tinstances: convertRegionalDataToInstances(regionalData),\n\t\t}\n\t})\n}\n\n// convertRegionalDataToInstances converts the regionalData map to a slice of DBInstance\nfunc convertRegionalDataToInstances(regionalData map[string]*types.DBInstance) []types.DBInstance {\n\tif regionalData == nil {\n\t\treturn nil\n\t}\n\tinstances := make([]types.DBInstance, 0, len(regionalData))\n\tfor _, instance := range regionalData {\n\t\tif instance != nil {\n\t\t\tinstances = append(instances, *instance)\n\t\t}\n\t}\n\treturn instances\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service/services.go",
    "content": "// Copyright 2026 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage service\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype EnhancedMetricsService interface {\n\t// GetMetrics returns enhanced metrics for the given resources and enhancedMetricConfigs.\n\t// filteredResources are the resources that belong to the service's namespace.\n\t// filteredEnhancedMetricConfigs are the enhanced metric configs that belong to the service's namespace and are supported by the service.\n\tGetMetrics(\n\t\tctx context.Context,\n\t\tlogger *slog.Logger,\n\t\tfilteredResources []*model.TaggedResource,\n\t\tfilteredEnhancedMetricConfigs []*model.EnhancedMetricConfig,\n\t\texportedTagOnMetrics []string,\n\t\tregion string,\n\t\trole model.Role,\n\t\tregionalConfigProvider config.RegionalConfigProvider,\n\t) ([]*model.CloudwatchData, error)\n\n\t// IsMetricSupported checks if the given metric name is supported by this service.\n\tIsMetricSupported(metricName string) bool\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage enhancedmetrics\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// MetricsServiceRegistry defines an interface to get enhanced metrics services by namespace\ntype MetricsServiceRegistry interface {\n\tGetEnhancedMetricsService(namespace string) (service.EnhancedMetricsService, error)\n}\n\n// Service is responsible for getting enhanced metrics using appropriate services.\ntype Service struct {\n\tconfigProvider                 config.RegionalConfigProvider\n\tenhancedMetricsServiceRegistry MetricsServiceRegistry\n}\n\n// GetMetrics returns the enhanced metrics for the specified namespace using the appropriate enhanced metrics service.\nfunc (ep *Service) GetMetrics(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tnamespace string,\n\tresources []*model.TaggedResource,\n\tmetrics []*model.EnhancedMetricConfig,\n\texportedTagOnMetrics []string,\n\tregion string,\n\trole model.Role,\n) ([]*model.CloudwatchData, error) {\n\tsvc, err := ep.enhancedMetricsServiceRegistry.GetEnhancedMetricsService(namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get enhanced metric service for namespace %s: %w\", namespace, err)\n\t}\n\n\t// filter out resources that do not match the service's namespace, it should not happen in the current scenario\n\tvar filteredResources []*model.TaggedResource\n\tfor _, res := range resources {\n\t\tif res.Namespace == namespace {\n\t\t\tfilteredResources = append(filteredResources, res)\n\t\t} else {\n\t\t\t// Resource validation should have happened earlier, this log will identify any unexpected issues\n\t\t\tlogger.Warn(\"Skipping resource for enhanced metric service due to namespace mismatch\",\n\t\t\t\t\"expected_namespace\", namespace,\n\t\t\t\t\"resource_namespace\", res.Namespace,\n\t\t\t\t\"resource_arn\", res.ARN,\n\t\t\t)\n\t\t}\n\t}\n\n\t// filter out metrics that are not supported by the service\n\tvar filteredMetrics []*model.EnhancedMetricConfig\n\tfor _, metric := range metrics {\n\t\tif svc.IsMetricSupported(metric.Name) {\n\t\t\tfilteredMetrics = append(filteredMetrics, metric)\n\t\t} else {\n\t\t\t// Metrics validation should have happened earlier, this log will identify any unexpected issues\n\t\t\tlogger.Warn(\"Skipping unsupported enhanced metric for service\",\n\t\t\t\t\"namespace\", namespace,\n\t\t\t\t\"metric\", metric.Name,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn svc.GetMetrics(ctx, logger, filteredResources, filteredMetrics, exportedTagOnMetrics, region, role, ep.configProvider)\n}\n\nfunc NewService(\n\tconfigProvider config.RegionalConfigProvider,\n\tenhancedMetricsServiceRegistry MetricsServiceRegistry,\n) *Service {\n\treturn &Service{\n\t\tconfigProvider:                 configProvider,\n\t\tenhancedMetricsServiceRegistry: enhancedMetricsServiceRegistry,\n\t}\n}\n"
  },
  {
    "path": "pkg/internal/enhancedmetrics/service_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage enhancedmetrics\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log/slog\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/service\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\n// mockConfigProvider is a mock implementation of config.RegionalConfigProvider\ntype mockConfigProvider struct {\n\tconfigs map[string]*aws.Config\n}\n\nfunc (m *mockConfigProvider) GetAWSRegionalConfig(region string, _ model.Role) *aws.Config {\n\tif m.configs == nil {\n\t\treturn &aws.Config{}\n\t}\n\tif cfg, ok := m.configs[region]; ok {\n\t\treturn cfg\n\t}\n\treturn &aws.Config{}\n}\n\n// mockMetricsService is a mock implementation of service.EnhancedMetricsService\ntype mockMetricsService struct {\n\tgetMetricsCalled int\n\terr              error\n\tresult           []*model.CloudwatchData\n\tmu               sync.Mutex\n}\n\nfunc (m *mockMetricsService) GetMetrics(context.Context, *slog.Logger, []*model.TaggedResource, []*model.EnhancedMetricConfig, []string, string, model.Role, config.RegionalConfigProvider) ([]*model.CloudwatchData, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.getMetricsCalled++\n\treturn m.result, m.err\n}\n\nfunc (m *mockMetricsService) IsMetricSupported(_ string) bool {\n\treturn true\n}\n\nfunc (m *mockMetricsService) getGetMetricsCalled() int {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn m.getMetricsCalled\n}\n\n// mockMetricsServiceRegistry is a mock implementation of MetricsServiceRegistry\ntype mockMetricsServiceRegistry struct {\n\tservices map[string]service.EnhancedMetricsService\n\tgetErr   error\n}\n\nfunc (m *mockMetricsServiceRegistry) GetEnhancedMetricsService(namespace string) (service.EnhancedMetricsService, error) {\n\tif m.getErr != nil {\n\t\treturn nil, m.getErr\n\t}\n\tif svc, ok := m.services[namespace]; ok {\n\t\treturn svc, nil\n\t}\n\treturn nil, errors.New(\"service not found\")\n}\n\nfunc TestNewService(t *testing.T) {\n\tsvc := NewService(&mockConfigProvider{}, &mockMetricsServiceRegistry{})\n\trequire.NotNil(t, svc)\n\trequire.NotNil(t, svc.configProvider)\n}\n\nfunc TestService_GetMetrics(t *testing.T) {\n\tctx := context.Background()\n\tlogger := slog.New(slog.DiscardHandler)\n\tnamespace := \"AWS/RDS\"\n\tregion := \"us-east-1\"\n\trole := model.Role{RoleArn: \"arn:aws:iam::123456789012:role/test\"}\n\tresources := []*model.TaggedResource{\n\t\t{\n\t\t\tARN:       \"arn:aws:rds:us-east-1:123456789012:db:test\",\n\t\t\tNamespace: namespace,\n\t\t\tRegion:    region,\n\t\t},\n\t}\n\tmetrics := []*model.EnhancedMetricConfig{\n\t\t{Name: \"AllocatedStorage\"},\n\t}\n\texportedTags := []string{\"Name\"}\n\n\ttests := []struct {\n\t\tname                 string\n\t\tnamespace            string\n\t\tregistry             MetricsServiceRegistry\n\t\twantErr              bool\n\t\terrMsg               string\n\t\twantData             []*model.CloudwatchData\n\t\twantGetMetricsCalled int\n\t}{\n\t\t{\n\t\t\tname:      \"successfully get metrics\",\n\t\t\tnamespace: namespace,\n\t\t\tregistry: &mockMetricsServiceRegistry{\n\t\t\t\tservices: map[string]service.EnhancedMetricsService{\n\t\t\t\t\tnamespace: &mockMetricsService{\n\t\t\t\t\t\tresult: []*model.CloudwatchData{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tMetricName:   \"AllocatedStorage\",\n\t\t\t\t\t\t\t\tResourceName: \"arn:aws:rds:us-east-1:123456789012:db:test\",\n\t\t\t\t\t\t\t\tNamespace:    namespace,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t\twantData: []*model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName:   \"AllocatedStorage\",\n\t\t\t\t\tResourceName: \"arn:aws:rds:us-east-1:123456789012:db:test\",\n\t\t\t\t\tNamespace:    namespace,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantGetMetricsCalled: 1,\n\t\t},\n\t\t{\n\t\t\tname:      \"failure when service not found in registry\",\n\t\t\tnamespace: namespace,\n\t\t\tregistry: &mockMetricsServiceRegistry{\n\t\t\t\tservices: map[string]service.EnhancedMetricsService{},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t\terrMsg:  \"service not found\",\n\t\t},\n\t\t{\n\t\t\tname:      \"failure when service GetMetrics returns error\",\n\t\t\tnamespace: namespace,\n\t\t\tregistry: &mockMetricsServiceRegistry{\n\t\t\t\tservices: map[string]service.EnhancedMetricsService{\n\t\t\t\t\tnamespace: &mockMetricsService{\n\t\t\t\t\t\terr: errors.New(\"get metric error\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr:              true,\n\t\t\terrMsg:               \"get metric error\",\n\t\t\twantGetMetricsCalled: 1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tsvc := NewService(\n\t\t\t\t&mockConfigProvider{},\n\t\t\t\ttt.registry,\n\t\t\t)\n\n\t\t\tdata, err := svc.GetMetrics(ctx, logger, tt.namespace, resources, metrics, exportedTags, region, role)\n\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.Contains(t, err.Error(), tt.errMsg)\n\t\t\t\trequire.Nil(t, data)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tt.wantData, data)\n\t\t\t}\n\n\t\t\tif tt.wantGetMetricsCalled > 0 {\n\t\t\t\tmockSvc := tt.registry.(*mockMetricsServiceRegistry).services[tt.namespace].(*mockMetricsService)\n\t\t\t\trequire.Equal(t, tt.wantGetMetricsCalled, mockSvc.getGetMetricsCalled())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/cloudwatchrunner/customnamespace.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatchrunner\n\nimport (\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/listmetrics\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype CustomNamespaceJob struct {\n\tJob model.CustomNamespaceJob\n}\n\nfunc (c CustomNamespaceJob) Namespace() string {\n\treturn c.Job.Namespace\n}\n\nfunc (c CustomNamespaceJob) listMetricsParams() listmetrics.ProcessingParams {\n\treturn listmetrics.ProcessingParams{\n\t\tNamespace:                 c.Job.Namespace,\n\t\tMetrics:                   c.Job.Metrics,\n\t\tRecentlyActiveOnly:        c.Job.RecentlyActiveOnly,\n\t\tDimensionNameRequirements: c.Job.DimensionNameRequirements,\n\t}\n}\n\nfunc (c CustomNamespaceJob) CustomTags() []model.Tag {\n\treturn c.Job.CustomTags\n}\n\nfunc (c CustomNamespaceJob) resourceEnrichment() ResourceEnrichment {\n\t// TODO add implementation in followup\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/job/cloudwatchrunner/discovery.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatchrunner\n\nimport (\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/listmetrics\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype DiscoveryJob struct {\n\tJob       model.DiscoveryJob\n\tResources []*model.TaggedResource\n}\n\nfunc (d DiscoveryJob) Namespace() string {\n\treturn d.Job.Namespace\n}\n\nfunc (d DiscoveryJob) CustomTags() []model.Tag {\n\treturn d.Job.CustomTags\n}\n\nfunc (d DiscoveryJob) listMetricsParams() listmetrics.ProcessingParams {\n\treturn listmetrics.ProcessingParams{\n\t\tNamespace:                 d.Job.Namespace,\n\t\tMetrics:                   d.Job.Metrics,\n\t\tRecentlyActiveOnly:        d.Job.RecentlyActiveOnly,\n\t\tDimensionNameRequirements: d.Job.DimensionNameRequirements,\n\t}\n}\n\nfunc (d DiscoveryJob) resourceEnrichment() ResourceEnrichment {\n\t// TODO add implementation in followup\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/job/cloudwatchrunner/runner.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage cloudwatchrunner\n\nimport (\n\t\"log/slog\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/listmetrics\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/resourcemetadata\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype ResourceEnrichment interface {\n\tCreate(logger *slog.Logger) resourcemetadata.MetricResourceEnricher\n}\n\ntype Job interface {\n\tNamespace() string\n\tCustomTags() []model.Tag\n\tlistMetricsParams() listmetrics.ProcessingParams\n\tresourceEnrichment() ResourceEnrichment\n}\n"
  },
  {
    "path": "pkg/job/custom.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc runCustomNamespaceJob(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tjob model.CustomNamespaceJob,\n\tclientCloudwatch cloudwatch.Client,\n\tgmdProcessor getMetricDataProcessor,\n) []*model.CloudwatchData {\n\tcloudwatchDatas := getMetricDataForQueriesForCustomNamespace(ctx, job, clientCloudwatch, logger)\n\tif len(cloudwatchDatas) == 0 {\n\t\tlogger.Debug(\"No metrics data found\")\n\t\treturn nil\n\t}\n\n\tvar err error\n\tcloudwatchDatas, err = gmdProcessor.Run(ctx, job.Namespace, cloudwatchDatas)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to get metric data\", \"err\", err)\n\t\treturn nil\n\t}\n\n\treturn cloudwatchDatas\n}\n\nfunc getMetricDataForQueriesForCustomNamespace(\n\tctx context.Context,\n\tcustomNamespaceJob model.CustomNamespaceJob,\n\tclientCloudwatch cloudwatch.Client,\n\tlogger *slog.Logger,\n) []*model.CloudwatchData {\n\tmux := &sync.Mutex{}\n\tvar getMetricDatas []*model.CloudwatchData\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(customNamespaceJob.Metrics))\n\n\tfor _, metric := range customNamespaceJob.Metrics {\n\t\t// For every metric of the job get the full list of metrics.\n\t\t// This includes, for this metric the possible combinations\n\t\t// of dimensions and value of dimensions with data.\n\n\t\tgo func(metric *model.MetricConfig) {\n\t\t\tdefer wg.Done()\n\t\t\terr := clientCloudwatch.ListMetrics(ctx, customNamespaceJob.Namespace, metric, customNamespaceJob.RecentlyActiveOnly, func(page []*model.Metric) {\n\t\t\t\tvar data []*model.CloudwatchData\n\n\t\t\t\tfor _, cwMetric := range page {\n\t\t\t\t\tif len(customNamespaceJob.DimensionNameRequirements) > 0 && !metricDimensionsMatchNames(cwMetric, customNamespaceJob.DimensionNameRequirements) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, stat := range metric.Statistics {\n\t\t\t\t\t\tdata = append(data, &model.CloudwatchData{\n\t\t\t\t\t\t\tMetricName:   metric.Name,\n\t\t\t\t\t\t\tResourceName: customNamespaceJob.Name,\n\t\t\t\t\t\t\tNamespace:    customNamespaceJob.Namespace,\n\t\t\t\t\t\t\tDimensions:   cwMetric.Dimensions,\n\t\t\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\t\t\tPeriod:    metric.Period,\n\t\t\t\t\t\t\t\tLength:    metric.Length,\n\t\t\t\t\t\t\t\tDelay:     metric.Delay,\n\t\t\t\t\t\t\t\tStatistic: stat,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\t\tNilToZero:              metric.NilToZero,\n\t\t\t\t\t\t\t\tAddCloudwatchTimestamp: metric.AddCloudwatchTimestamp,\n\t\t\t\t\t\t\t\tExportAllDataPoints:    metric.ExportAllDataPoints,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTags:                      nil,\n\t\t\t\t\t\t\tGetMetricDataResult:       nil,\n\t\t\t\t\t\t\tGetMetricStatisticsResult: nil,\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmux.Lock()\n\t\t\t\tgetMetricDatas = append(getMetricDatas, data...)\n\t\t\t\tmux.Unlock()\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Failed to get full metric list\", \"metric_name\", metric.Name, \"namespace\", customNamespaceJob.Namespace, \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(metric)\n\t}\n\n\twg.Wait()\n\treturn getMetricDatas\n}\n"
  },
  {
    "path": "pkg/job/discovery.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/tagging\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/maxdimassociator\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype resourceAssociator interface {\n\tAssociateMetricToResource(cwMetric *model.Metric) (*model.TaggedResource, bool)\n}\n\ntype getMetricDataProcessor interface {\n\tRun(ctx context.Context, namespace string, requests []*model.CloudwatchData) ([]*model.CloudwatchData, error)\n}\n\ntype enhancedMetricsService interface {\n\tGetMetrics(\n\t\tctx context.Context,\n\t\tlogger *slog.Logger,\n\t\tnamespace string,\n\t\tresources []*model.TaggedResource,\n\t\tmetrics []*model.EnhancedMetricConfig,\n\t\texportedTagOnMetrics []string,\n\t\tregion string,\n\t\trole model.Role,\n\t) ([]*model.CloudwatchData, error)\n}\n\nfunc runDiscoveryJob(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tjob model.DiscoveryJob,\n\tregion string,\n\tclientTag tagging.Client,\n\tclientCloudwatch cloudwatch.Client,\n\tgmdProcessor getMetricDataProcessor,\n\tenhancedMetricsService enhancedMetricsService,\n\trole model.Role,\n) ([]*model.TaggedResource, []*model.CloudwatchData) {\n\tlogger.Debug(\"Get tagged resources\")\n\n\tresources, err := clientTag.GetResources(ctx, job, region)\n\tif err != nil {\n\t\tif errors.Is(err, tagging.ErrExpectedToFindResources) {\n\t\t\tlogger.Error(\"No tagged resources made it through filtering\", \"err\", err)\n\t\t} else {\n\t\t\tlogger.Error(\"Couldn't describe resources\", \"err\", err)\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tif len(resources) == 0 {\n\t\tlogger.Debug(\"No tagged resources\", \"region\", region, \"namespace\", job.Namespace)\n\t}\n\n\tsvc := config.SupportedServices.GetService(job.Namespace)\n\tmetricData := getMetricDataForQueries(ctx, logger, job, svc, clientCloudwatch, resources)\n\n\tif len(metricData) > 0 && svc != nil {\n\t\tmetricData, err = gmdProcessor.Run(ctx, svc.Namespace, metricData)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Failed to get metric data\", \"err\", err)\n\n\t\t\t// ensure we do not return cw metrics on data processing failure\n\t\t\tmetricData = nil\n\t\t}\n\t}\n\n\tif enhancedMetricsService == nil || !job.HasEnhancedMetrics() || svc == nil {\n\t\tif len(metricData) == 0 {\n\t\t\tlogger.Info(\"No metrics data found\")\n\t\t}\n\t\treturn resources, metricData\n\t}\n\n\tlogger.Debug(\"Processing enhanced metrics\", \"count\", len(job.EnhancedMetrics), \"namespace\", svc.Namespace)\n\tenhancedMetricData, err := enhancedMetricsService.GetMetrics(\n\t\tctx,\n\t\tlogger,\n\t\tsvc.Namespace,\n\t\tresources,\n\t\tjob.EnhancedMetrics,\n\t\tjob.ExportedTagsOnMetrics,\n\t\tregion,\n\t\trole,\n\t)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to get enhanced metrics\", \"err\", err)\n\t\treturn resources, metricData\n\t}\n\n\tmetricData = append(metricData, enhancedMetricData...)\n\n\tif len(metricData) == 0 {\n\t\tlogger.Info(\"No metrics data found\")\n\t}\n\n\treturn resources, metricData\n}\n\nfunc getMetricDataForQueries(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tdiscoveryJob model.DiscoveryJob,\n\tsvc *config.ServiceConfig,\n\tclientCloudwatch cloudwatch.Client,\n\tresources []*model.TaggedResource,\n) []*model.CloudwatchData {\n\tmux := &sync.Mutex{}\n\tvar getMetricDatas []*model.CloudwatchData\n\n\tvar assoc resourceAssociator\n\tif len(svc.DimensionRegexps) > 0 && len(resources) > 0 {\n\t\tassoc = maxdimassociator.NewAssociator(logger, discoveryJob.DimensionsRegexps, resources)\n\t} else {\n\t\t// If we don't have dimension regex's and resources there's nothing to associate but metrics shouldn't be skipped\n\t\tassoc = nopAssociator{}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(discoveryJob.Metrics))\n\n\t// For every metric of the job call the ListMetrics API\n\t// to fetch the existing combinations of dimensions and\n\t// value of dimensions with data.\n\tfor _, metric := range discoveryJob.Metrics {\n\t\tgo func(metric *model.MetricConfig) {\n\t\t\tdefer wg.Done()\n\n\t\t\terr := clientCloudwatch.ListMetrics(ctx, svc.Namespace, metric, discoveryJob.RecentlyActiveOnly, func(page []*model.Metric) {\n\t\t\t\tdata := getFilteredMetricDatas(logger, discoveryJob.Namespace, discoveryJob.ExportedTagsOnMetrics, page, discoveryJob.DimensionNameRequirements, metric, assoc)\n\n\t\t\t\tmux.Lock()\n\t\t\t\tgetMetricDatas = append(getMetricDatas, data...)\n\t\t\t\tmux.Unlock()\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Failed to get full metric list\", \"metric_name\", metric.Name, \"namespace\", svc.Namespace, \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}(metric)\n\t}\n\n\twg.Wait()\n\treturn getMetricDatas\n}\n\ntype nopAssociator struct{}\n\nfunc (ns nopAssociator) AssociateMetricToResource(_ *model.Metric) (*model.TaggedResource, bool) {\n\treturn nil, false\n}\n\nfunc getFilteredMetricDatas(\n\tlogger *slog.Logger,\n\tnamespace string,\n\ttagsOnMetrics []string,\n\tmetricsList []*model.Metric,\n\tdimensionNameList []string,\n\tm *model.MetricConfig,\n\tassoc resourceAssociator,\n) []*model.CloudwatchData {\n\tgetMetricsData := make([]*model.CloudwatchData, 0, len(metricsList))\n\tfor _, cwMetric := range metricsList {\n\t\tif len(dimensionNameList) > 0 && !metricDimensionsMatchNames(cwMetric, dimensionNameList) {\n\t\t\tcontinue\n\t\t}\n\n\t\tmatchedResource, skip := assoc.AssociateMetricToResource(cwMetric)\n\t\tif skip {\n\t\t\tdimensions := make([]string, 0, len(cwMetric.Dimensions))\n\t\t\tfor _, dim := range cwMetric.Dimensions {\n\t\t\t\tdimensions = append(dimensions, fmt.Sprintf(\"%s=%s\", dim.Name, dim.Value))\n\t\t\t}\n\t\t\tlogger.Debug(\"skipping metric unmatched by associator\", \"metric\", m.Name, \"dimensions\", strings.Join(dimensions, \",\"))\n\n\t\t\tcontinue\n\t\t}\n\n\t\tresource := matchedResource\n\t\tif resource == nil {\n\t\t\tresource = &model.TaggedResource{\n\t\t\t\tARN:       \"global\",\n\t\t\t\tNamespace: namespace,\n\t\t\t}\n\t\t}\n\n\t\tmetricTags := resource.MetricTags(tagsOnMetrics)\n\t\tfor _, stat := range m.Statistics {\n\t\t\tgetMetricsData = append(getMetricsData, &model.CloudwatchData{\n\t\t\t\tMetricName:   m.Name,\n\t\t\t\tResourceName: resource.ARN,\n\t\t\t\tNamespace:    namespace,\n\t\t\t\tDimensions:   cwMetric.Dimensions,\n\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\tPeriod:    m.Period,\n\t\t\t\t\tLength:    m.Length,\n\t\t\t\t\tDelay:     m.Delay,\n\t\t\t\t\tStatistic: stat,\n\t\t\t\t},\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              m.NilToZero,\n\t\t\t\t\tAddCloudwatchTimestamp: m.AddCloudwatchTimestamp,\n\t\t\t\t\tExportAllDataPoints:    m.ExportAllDataPoints,\n\t\t\t\t},\n\t\t\t\tTags:                      metricTags,\n\t\t\t\tGetMetricDataResult:       nil,\n\t\t\t\tGetMetricStatisticsResult: nil,\n\t\t\t})\n\t\t}\n\t}\n\treturn getMetricsData\n}\n\nfunc metricDimensionsMatchNames(metric *model.Metric, dimensionNameRequirements []string) bool {\n\tif len(dimensionNameRequirements) != len(metric.Dimensions) {\n\t\treturn false\n\t}\n\tfor _, dimension := range metric.Dimensions {\n\t\tfoundMatch := false\n\t\tfor _, dimensionName := range dimensionNameRequirements {\n\t\t\tif dimension.Name == dimensionName {\n\t\t\t\tfoundMatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundMatch {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/job/discovery_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/maxdimassociator\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc Test_getFilteredMetricDatas(t *testing.T) {\n\ttype args struct {\n\t\tregion                    string\n\t\taccountID                 string\n\t\tnamespace                 string\n\t\tcustomTags                []model.Tag\n\t\ttagsOnMetrics             []string\n\t\tdimensionRegexps          []model.DimensionsRegexp\n\t\tdimensionNameRequirements []string\n\t\tresources                 []*model.TaggedResource\n\t\tmetricsList               []*model.Metric\n\t\tm                         *model.MetricConfig\n\t}\n\ttests := []struct {\n\t\tname               string\n\t\targs               args\n\t\twantGetMetricsData []model.CloudwatchData\n\t}{\n\t\t{\n\t\t\t\"additional dimension\",\n\t\t\targs{\n\t\t\t\tregion:     \"us-east-1\",\n\t\t\t\taccountID:  \"123123123123\",\n\t\t\t\tnamespace:  \"efs\",\n\t\t\t\tcustomTags: nil,\n\t\t\t\ttagsOnMetrics: []string{\n\t\t\t\t\t\"Value1\",\n\t\t\t\t\t\"Value2\",\n\t\t\t\t},\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EFS\").ToModelDimensionsRegexp(),\n\t\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tARN: \"arn:aws:elasticfilesystem:us-east-1:123123123123:file-system/fs-abc123\",\n\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"Tag\",\n\t\t\t\t\t\t\t\tValue: \"some-Tag\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"efs\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmetricsList: []*model.Metric{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"StorageBytes\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"FileSystemId\",\n\t\t\t\t\t\t\t\tValue: \"fs-abc123\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"StorageClass\",\n\t\t\t\t\t\t\t\tValue: \"Standard\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/EFS\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tm: &model.MetricConfig{\n\t\t\t\t\tName: \"StorageBytes\",\n\t\t\t\t\tStatistics: []string{\n\t\t\t\t\t\t\"Average\",\n\t\t\t\t\t},\n\t\t\t\t\tPeriod:                 60,\n\t\t\t\t\tLength:                 600,\n\t\t\t\t\tDelay:                  120,\n\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"StorageBytes\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"FileSystemId\",\n\t\t\t\t\t\t\tValue: \"fs-abc123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"StorageClass\",\n\t\t\t\t\t\t\tValue: \"Standard\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResourceName: \"arn:aws:elasticfilesystem:us-east-1:123123123123:file-system/fs-abc123\",\n\t\t\t\t\tNamespace:    \"efs\",\n\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value1\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value2\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\tPeriod:    60,\n\t\t\t\t\t\tLength:    600,\n\t\t\t\t\t\tDelay:     120,\n\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t},\n\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"ec2\",\n\t\t\targs{\n\t\t\t\tregion:     \"us-east-1\",\n\t\t\t\taccountID:  \"123123123123\",\n\t\t\t\tnamespace:  \"ec2\",\n\t\t\t\tcustomTags: nil,\n\t\t\t\ttagsOnMetrics: []string{\n\t\t\t\t\t\"Value1\",\n\t\t\t\t\t\"Value2\",\n\t\t\t\t},\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EC2\").ToModelDimensionsRegexp(),\n\t\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tARN: \"arn:aws:ec2:us-east-1:123123123123:instance/i-12312312312312312\",\n\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"Name\",\n\t\t\t\t\t\t\t\tValue: \"some-Node\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"ec2\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmetricsList: []*model.Metric{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"InstanceId\",\n\t\t\t\t\t\t\t\tValue: \"i-12312312312312312\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/EC2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tm: &model.MetricConfig{\n\t\t\t\t\tName: \"CPUUtilization\",\n\t\t\t\t\tStatistics: []string{\n\t\t\t\t\t\t\"Average\",\n\t\t\t\t\t},\n\t\t\t\t\tPeriod:                 60,\n\t\t\t\t\tLength:                 600,\n\t\t\t\t\tDelay:                  120,\n\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName:   \"CPUUtilization\",\n\t\t\t\t\tResourceName: \"arn:aws:ec2:us-east-1:123123123123:instance/i-12312312312312312\",\n\t\t\t\t\tNamespace:    \"ec2\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"InstanceId\",\n\t\t\t\t\t\t\tValue: \"i-12312312312312312\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value1\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value2\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t\tPeriod:    60,\n\t\t\t\t\t\tLength:    600,\n\t\t\t\t\t\tDelay:     120,\n\t\t\t\t\t},\n\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"kafka\",\n\t\t\targs{\n\t\t\t\tregion:     \"us-east-1\",\n\t\t\t\taccountID:  \"123123123123\",\n\t\t\t\tnamespace:  \"kafka\",\n\t\t\t\tcustomTags: nil,\n\t\t\t\ttagsOnMetrics: []string{\n\t\t\t\t\t\"Value1\",\n\t\t\t\t\t\"Value2\",\n\t\t\t\t},\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Kafka\").ToModelDimensionsRegexp(),\n\t\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tARN: \"arn:aws:kafka:us-east-1:123123123123:cluster/demo-cluster-1/12312312-1231-1231-1231-123123123123-12\",\n\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"Test\",\n\t\t\t\t\t\t\t\tValue: \"Value\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"kafka\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmetricsList: []*model.Metric{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"GlobalTopicCount\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"Cluster Name\",\n\t\t\t\t\t\t\t\tValue: \"demo-cluster-1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/Kafka\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tm: &model.MetricConfig{\n\t\t\t\t\tName: \"GlobalTopicCount\",\n\t\t\t\t\tStatistics: []string{\n\t\t\t\t\t\t\"Average\",\n\t\t\t\t\t},\n\t\t\t\t\tPeriod:                 60,\n\t\t\t\t\tLength:                 600,\n\t\t\t\t\tDelay:                  120,\n\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"GlobalTopicCount\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"Cluster Name\",\n\t\t\t\t\t\t\tValue: \"demo-cluster-1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResourceName: \"arn:aws:kafka:us-east-1:123123123123:cluster/demo-cluster-1/12312312-1231-1231-1231-123123123123-12\",\n\t\t\t\t\tNamespace:    \"kafka\",\n\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value1\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"Value2\",\n\t\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t\tPeriod:    60,\n\t\t\t\t\t\tLength:    600,\n\t\t\t\t\t\tDelay:     120,\n\t\t\t\t\t},\n\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"alb\",\n\t\t\targs{\n\t\t\t\tregion:                    \"us-east-1\",\n\t\t\t\taccountID:                 \"123123123123\",\n\t\t\t\tnamespace:                 \"alb\",\n\t\t\t\tcustomTags:                nil,\n\t\t\t\ttagsOnMetrics:             nil,\n\t\t\t\tdimensionRegexps:          config.SupportedServices.GetService(\"AWS/ApplicationELB\").ToModelDimensionsRegexp(),\n\t\t\t\tdimensionNameRequirements: []string{\"LoadBalancer\", \"TargetGroup\"},\n\t\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tARN: \"arn:aws:elasticloadbalancing:us-east-1:123123123123:loadbalancer/app/some-ALB/0123456789012345\",\n\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"Name\",\n\t\t\t\t\t\t\t\tValue: \"some-ALB\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"alb\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmetricsList: []*model.Metric{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"RequestCount\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"LoadBalancer\",\n\t\t\t\t\t\t\t\tValue: \"app/some-ALB/0123456789012345\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"TargetGroup\",\n\t\t\t\t\t\t\t\tValue: \"targetgroup/some-ALB/9999666677773333\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"AvailabilityZone\",\n\t\t\t\t\t\t\t\tValue: \"us-east-1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ApplicationELB\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"RequestCount\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"LoadBalancer\",\n\t\t\t\t\t\t\t\tValue: \"app/some-ALB/0123456789012345\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"TargetGroup\",\n\t\t\t\t\t\t\t\tValue: \"targetgroup/some-ALB/9999666677773333\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ApplicationELB\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"RequestCount\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"LoadBalancer\",\n\t\t\t\t\t\t\t\tValue: \"app/some-ALB/0123456789012345\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"AvailabilityZone\",\n\t\t\t\t\t\t\t\tValue: \"us-east-1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ApplicationELB\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"RequestCount\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"LoadBalancer\",\n\t\t\t\t\t\t\t\tValue: \"app/some-ALB/0123456789012345\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ApplicationELB\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tm: &model.MetricConfig{\n\t\t\t\t\tName: \"RequestCount\",\n\t\t\t\t\tStatistics: []string{\n\t\t\t\t\t\t\"Sum\",\n\t\t\t\t\t},\n\t\t\t\t\tPeriod:                 60,\n\t\t\t\t\tLength:                 600,\n\t\t\t\t\tDelay:                  120,\n\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"RequestCount\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"LoadBalancer\",\n\t\t\t\t\t\t\tValue: \"app/some-ALB/0123456789012345\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"TargetGroup\",\n\t\t\t\t\t\t\tValue: \"targetgroup/some-ALB/9999666677773333\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResourceName: \"arn:aws:elasticloadbalancing:us-east-1:123123123123:loadbalancer/app/some-ALB/0123456789012345\",\n\t\t\t\t\tNamespace:    \"alb\",\n\t\t\t\t\tTags:         []model.Tag{},\n\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\tStatistic: \"Sum\",\n\t\t\t\t\t\tPeriod:    60,\n\t\t\t\t\t\tLength:    600,\n\t\t\t\t\t\tDelay:     120,\n\t\t\t\t\t},\n\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"sagemaker: ARN contains uppercase letters\",\n\t\t\targs{\n\t\t\t\tregion:           \"us-east-1\",\n\t\t\t\taccountID:        \"123123123123\",\n\t\t\t\tnamespace:        \"AWS/SageMaker\",\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources: []*model.TaggedResource{\n\t\t\t\t\t{\n\t\t\t\t\t\tARN: \"arn:aws:sagemaker:us-east-1:123123123123:endpoint/someEndpoint\",\n\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"Environment\",\n\t\t\t\t\t\t\t\tValue: \"prod\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"sagemaker\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmetricsList: []*model.Metric{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"Invocation4XXErrors\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"someEndpoint\"},\n\t\t\t\t\t\t\t{Name: \"VariantName\", Value: \"AllTraffic\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/SageMaker\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tm: &model.MetricConfig{\n\t\t\t\t\tName: \"Invocation4XXErrors\",\n\t\t\t\t\tStatistics: []string{\n\t\t\t\t\t\t\"Sum\",\n\t\t\t\t\t},\n\t\t\t\t\tPeriod: 60,\n\t\t\t\t\tLength: 600,\n\t\t\t\t\tDelay:  120,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]model.CloudwatchData{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"Invocation4XXErrors\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"someEndpoint\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"AllTraffic\"},\n\t\t\t\t\t},\n\t\t\t\t\tResourceName: \"arn:aws:sagemaker:us-east-1:123123123123:endpoint/someEndpoint\",\n\t\t\t\t\tNamespace:    \"AWS/SageMaker\",\n\t\t\t\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\t\t\t\tStatistic: \"Sum\",\n\t\t\t\t\t\tPeriod:    60,\n\t\t\t\t\t\tLength:    600,\n\t\t\t\t\t\tDelay:     120,\n\t\t\t\t\t},\n\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassoc := maxdimassociator.NewAssociator(promslog.NewNopLogger(), tt.args.dimensionRegexps, tt.args.resources)\n\t\t\tmetricDatas := getFilteredMetricDatas(promslog.NewNopLogger(), tt.args.namespace, tt.args.tagsOnMetrics, tt.args.metricsList, tt.args.dimensionNameRequirements, tt.args.m, assoc)\n\t\t\tif len(metricDatas) != len(tt.wantGetMetricsData) {\n\t\t\t\tt.Errorf(\"len(getFilteredMetricDatas()) = %v, want %v\", len(metricDatas), len(tt.wantGetMetricsData))\n\t\t\t}\n\t\t\tfor i, got := range metricDatas {\n\t\t\t\twant := tt.wantGetMetricsData[i]\n\t\t\t\tassert.Equal(t, want.MetricName, got.MetricName)\n\t\t\t\tassert.Equal(t, want.ResourceName, got.ResourceName)\n\t\t\t\tassert.Equal(t, want.Namespace, got.Namespace)\n\t\t\t\tassert.ElementsMatch(t, want.Dimensions, got.Dimensions)\n\t\t\t\tassert.ElementsMatch(t, want.Tags, got.Tags)\n\t\t\t\tassert.Equal(t, want.MetricMigrationParams, got.MetricMigrationParams)\n\t\t\t\tassert.Equal(t, want.GetMetricDataProcessingParams.Statistic, got.GetMetricDataProcessingParams.Statistic)\n\t\t\t\tassert.Equal(t, want.GetMetricDataProcessingParams.Length, got.GetMetricDataProcessingParams.Length)\n\t\t\t\tassert.Equal(t, want.GetMetricDataProcessingParams.Period, got.GetMetricDataProcessingParams.Period)\n\t\t\t\tassert.Equal(t, want.GetMetricDataProcessingParams.Delay, got.GetMetricDataProcessingParams.Delay)\n\t\t\t\tassert.Nil(t, got.GetMetricDataResult)\n\t\t\t\tassert.Nil(t, got.GetMetricStatisticsResult)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/compact.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\n// compact iterates over a slice of pointers and deletes\n// unwanted elements as per the keep function return value.\n// The slice is modified in-place without copying elements.\nfunc compact[T any](input []*T, keep func(el *T) bool) []*T {\n\t// move all elements that must be kept at the beginning\n\ti := 0\n\tfor _, d := range input {\n\t\tif keep(d) {\n\t\t\tinput[i] = d\n\t\t\ti++\n\t\t}\n\t}\n\t// nil out any left element\n\tfor j := i; j < len(input); j++ {\n\t\tinput[j] = nil\n\t}\n\t// set new slice length to allow released elements to be collected\n\treturn input[:i]\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/compact_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestCompact(t *testing.T) {\n\ttype data struct {\n\t\tn int\n\t}\n\n\ttype testCase struct {\n\t\tname        string\n\t\tinput       []*data\n\t\tkeepFunc    func(el *data) bool\n\t\texpectedRes []*data\n\t}\n\n\ttestCases := []testCase{\n\t\t{\n\t\t\tname:        \"empty\",\n\t\t\tinput:       []*data{},\n\t\t\tkeepFunc:    nil,\n\t\t\texpectedRes: []*data{},\n\t\t},\n\t\t{\n\t\t\tname:        \"one element input, one element result\",\n\t\t\tinput:       []*data{{n: 0}},\n\t\t\tkeepFunc:    func(_ *data) bool { return true },\n\t\t\texpectedRes: []*data{{n: 0}},\n\t\t},\n\t\t{\n\t\t\tname:        \"one element input, empty result\",\n\t\t\tinput:       []*data{{n: 0}},\n\t\t\tkeepFunc:    func(_ *data) bool { return false },\n\t\t\texpectedRes: []*data{},\n\t\t},\n\t\t{\n\t\t\tname:        \"two elements input, two elements result\",\n\t\t\tinput:       []*data{{n: 0}, {n: 1}},\n\t\t\tkeepFunc:    func(_ *data) bool { return true },\n\t\t\texpectedRes: []*data{{n: 0}, {n: 1}},\n\t\t},\n\t\t{\n\t\t\tname:        \"two elements input, one element result (first)\",\n\t\t\tinput:       []*data{{n: 0}, {n: 1}},\n\t\t\tkeepFunc:    func(el *data) bool { return el.n == 1 },\n\t\t\texpectedRes: []*data{{n: 1}},\n\t\t},\n\t\t{\n\t\t\tname:        \"two elements input, one element result (last)\",\n\t\t\tinput:       []*data{{n: 0}, {n: 1}},\n\t\t\tkeepFunc:    func(el *data) bool { return el.n == 0 },\n\t\t\texpectedRes: []*data{{n: 0}},\n\t\t},\n\t\t{\n\t\t\tname:        \"two elements input, empty result\",\n\t\t\tinput:       []*data{{n: 0}, {n: 1}},\n\t\t\tkeepFunc:    func(_ *data) bool { return false },\n\t\t\texpectedRes: []*data{},\n\t\t},\n\t\t{\n\t\t\tname:        \"three elements input, empty result\",\n\t\t\tinput:       []*data{{n: 0}, {n: 1}, {n: 2}},\n\t\t\tkeepFunc:    func(el *data) bool { return el.n < 0 },\n\t\t\texpectedRes: []*data{},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tres := compact(tc.input, tc.keepFunc)\n\t\t\trequire.Equal(t, tc.expectedRes, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/iterator.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"math\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype iteratorFactory struct {\n\tmetricsPerQuery int\n}\n\nfunc (b iteratorFactory) Build(data []*model.CloudwatchData) Iterator {\n\tif len(data) == 0 {\n\t\treturn nothingToIterate{}\n\t}\n\n\tbatchSizesByPeriodAndDelay, longestLengthForBatch := mapProcessingParams(data)\n\n\tif len(batchSizesByPeriodAndDelay) == 1 {\n\t\t// Only 1 period use value from data\n\t\tperiod := data[0].GetMetricDataProcessingParams.Period\n\t\tif len(batchSizesByPeriodAndDelay[period]) == 1 {\n\t\t\t// Only 1 period with 1 delay use value from data and do simple batching\n\t\t\tdelay := data[0].GetMetricDataProcessingParams.Delay\n\t\t\tparams := StartAndEndTimeParams{\n\t\t\t\tPeriod: period,\n\t\t\t\tLength: longestLengthForBatch[period][delay],\n\t\t\t\tDelay:  delay,\n\t\t\t}\n\n\t\t\treturn NewSimpleBatchIterator(b.metricsPerQuery, data, params)\n\t\t}\n\t}\n\n\treturn NewVaryingTimeParameterBatchingIterator(b.metricsPerQuery, data, batchSizesByPeriodAndDelay, longestLengthForBatch)\n}\n\ntype (\n\tperiodDelayToBatchSize     = map[int64]map[int64]int\n\tperiodDelayToLongestLength = map[int64]map[int64]int64\n)\n\n// mapProcessingParams loops through all the incoming CloudwatchData to pre-compute important information\n// to be used when initializing the batching iterator\n// Knowing the period + delay combinations with their batch sizes will allow us to pre-allocate the batch slices that could\n// be very large ahead of time without looping again later\n// Similarly we need to know the largest length for a period + delay combination later so gathering it while we are already\n// iterating will save some cycles later\nfunc mapProcessingParams(data []*model.CloudwatchData) (periodDelayToBatchSize, periodDelayToLongestLength) {\n\tbatchSizesByPeriodAndDelay := periodDelayToBatchSize{}\n\tlongestLengthForBatch := periodDelayToLongestLength{}\n\n\tfor _, datum := range data {\n\t\tperiod := datum.GetMetricDataProcessingParams.Period\n\t\tdelay := datum.GetMetricDataProcessingParams.Delay\n\t\tif _, exists := batchSizesByPeriodAndDelay[period]; !exists {\n\t\t\tbatchSizesByPeriodAndDelay[period] = map[int64]int{delay: 0}\n\t\t\tlongestLengthForBatch[period] = map[int64]int64{delay: 0}\n\t\t}\n\t\tif _, exists := batchSizesByPeriodAndDelay[period][delay]; !exists {\n\t\t\tbatchSizesByPeriodAndDelay[period][delay] = 0\n\t\t\tlongestLengthForBatch[period][delay] = 0\n\t\t}\n\t\tbatchSizesByPeriodAndDelay[period][delay]++\n\t\tif longestLengthForBatch[period][delay] < datum.GetMetricDataProcessingParams.Length {\n\t\t\tlongestLengthForBatch[period][delay] = datum.GetMetricDataProcessingParams.Length\n\t\t}\n\t}\n\n\treturn batchSizesByPeriodAndDelay, longestLengthForBatch\n}\n\ntype nothingToIterate struct{}\n\nfunc (n nothingToIterate) Next() ([]*model.CloudwatchData, StartAndEndTimeParams) {\n\treturn nil, StartAndEndTimeParams{}\n}\n\nfunc (n nothingToIterate) HasMore() bool {\n\treturn false\n}\n\ntype simpleBatchingIterator struct {\n\tsize            int\n\tcurrentBatch    int\n\tdata            []*model.CloudwatchData\n\tentriesPerBatch int\n\tbatchParams     StartAndEndTimeParams\n}\n\nfunc (s *simpleBatchingIterator) Next() ([]*model.CloudwatchData, StartAndEndTimeParams) {\n\t// We are out of data return defaults\n\tif s.currentBatch >= s.size {\n\t\treturn nil, StartAndEndTimeParams{}\n\t}\n\n\tstartingIndex := s.currentBatch * s.entriesPerBatch\n\tendingIndex := startingIndex + s.entriesPerBatch\n\tif endingIndex > len(s.data) {\n\t\tendingIndex = len(s.data)\n\t}\n\n\t// TODO are we technically doing this https://go.dev/wiki/SliceTricks#batching-with-minimal-allocation and if not\n\t// would it change allocations to do this ahead of time?\n\tresult := s.data[startingIndex:endingIndex]\n\ts.currentBatch++\n\n\treturn result, s.batchParams\n}\n\nfunc (s *simpleBatchingIterator) HasMore() bool {\n\treturn s.currentBatch < s.size\n}\n\n// NewSimpleBatchIterator returns an iterator which slices the data in place based on the metricsPerQuery.\nfunc NewSimpleBatchIterator(metricsPerQuery int, data []*model.CloudwatchData, batchParams StartAndEndTimeParams) Iterator {\n\treturn &simpleBatchingIterator{\n\t\tsize:            int(math.Ceil(float64(len(data)) / float64(metricsPerQuery))),\n\t\tbatchParams:     batchParams,\n\t\tdata:            data,\n\t\tentriesPerBatch: metricsPerQuery,\n\t}\n}\n\ntype timeParameterBatchingIterator struct {\n\tcurrent   Iterator\n\tremaining []Iterator\n}\n\nfunc (t *timeParameterBatchingIterator) Next() ([]*model.CloudwatchData, StartAndEndTimeParams) {\n\tbatch, params := t.current.Next()\n\n\t// Doing this before returning from Next drastically simplifies HasMore because it can depend on\n\t// t.current.HasMore() being accurate.\n\tif !t.current.HasMore() {\n\t\t// Current iterator is out and there's none left, set current to nothingToIterate\n\t\tif len(t.remaining) == 0 {\n\t\t\tt.remaining = nil\n\t\t\tt.current = nothingToIterate{}\n\t\t} else {\n\t\t\t// Pop from https://go.dev/wiki/SliceTricks\n\t\t\tnext, remaining := t.remaining[len(t.remaining)-1], t.remaining[:len(t.remaining)-1]\n\t\t\tt.current = next\n\t\t\tt.remaining = remaining\n\t\t}\n\t}\n\n\treturn batch, params\n}\n\nfunc (t *timeParameterBatchingIterator) HasMore() bool {\n\treturn t.current.HasMore()\n}\n\nfunc NewVaryingTimeParameterBatchingIterator(\n\tmetricsPerQuery int,\n\tdata []*model.CloudwatchData,\n\tbatchSizes periodDelayToBatchSize,\n\tlongestLengthForBatch periodDelayToLongestLength,\n) Iterator {\n\tbatches := make(map[int64]map[int64][]*model.CloudwatchData, len(batchSizes))\n\tnumberOfIterators := 0\n\t// Pre-allocate batch slices\n\tfor period, delays := range batchSizes {\n\t\tbatches[period] = make(map[int64][]*model.CloudwatchData, len(delays))\n\t\tfor delay, batchSize := range delays {\n\t\t\tnumberOfIterators++\n\t\t\tbatches[period][delay] = make([]*model.CloudwatchData, 0, batchSize)\n\t\t}\n\t}\n\n\t// Fill the batches\n\tfor _, datum := range data {\n\t\tparams := datum.GetMetricDataProcessingParams\n\t\tbatch := batches[params.Period][params.Delay]\n\t\tbatches[params.Period][params.Delay] = append(batch, datum)\n\t}\n\n\tvar firstIterator Iterator\n\titerators := make([]Iterator, 0, numberOfIterators-1)\n\t// We are ranging a map, and we won't have an index to mark the first iterator\n\tisFirst := true\n\tfor period, delays := range batches {\n\t\tfor delay, batch := range delays {\n\t\t\tbatchParams := StartAndEndTimeParams{\n\t\t\t\tPeriod: period,\n\t\t\t\tDelay:  delay,\n\t\t\t}\n\t\t\t// Make sure to set the length to the longest length for the batch\n\t\t\tbatchParams.Length = longestLengthForBatch[period][delay]\n\t\t\titerator := NewSimpleBatchIterator(metricsPerQuery, batch, batchParams)\n\t\t\tif isFirst {\n\t\t\t\tfirstIterator = iterator\n\t\t\t\tisFirst = false\n\t\t\t} else {\n\t\t\t\titerators = append(iterators, iterator)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &timeParameterBatchingIterator{\n\t\tcurrent:   firstIterator,\n\t\tremaining: iterators,\n\t}\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/iterator_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"math/rand\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestIteratorFactory_Build(t *testing.T) {\n\ttests := []struct {\n\t\tname             string\n\t\tinput            []*model.CloudwatchData\n\t\texpectedIterator Iterator\n\t}{\n\t\t{\n\t\t\tname:             \"empty returns nothing to iterator\",\n\t\t\tinput:            []*model.CloudwatchData{},\n\t\t\texpectedIterator: nothingToIterate{},\n\t\t},\n\t\t{\n\t\t\tname: \"input with consistent period and delay returns simple batching\",\n\t\t\tinput: []*model.CloudwatchData{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t},\n\t\t\texpectedIterator: &simpleBatchingIterator{},\n\t\t},\n\t\t{\n\t\t\tname: \"input with inconsistent period returns time param batching\",\n\t\t\tinput: []*model.CloudwatchData{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 11, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 12, Delay: 100}},\n\t\t\t},\n\t\t\texpectedIterator: &timeParameterBatchingIterator{},\n\t\t},\n\t\t{\n\t\t\tname: \"input with inconsistent delay returns time param batching\",\n\t\t\tinput: []*model.CloudwatchData{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 101}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 102}},\n\t\t\t},\n\t\t\texpectedIterator: &timeParameterBatchingIterator{},\n\t\t},\n\t\t{\n\t\t\tname: \"input with inconsistent period and delay returns time param batching\",\n\t\t\tinput: []*model.CloudwatchData{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 10, Delay: 100}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 11, Delay: 101}},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 12, Delay: 102}},\n\t\t\t},\n\t\t\texpectedIterator: &timeParameterBatchingIterator{},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfactory := iteratorFactory{100}\n\t\t\titerator := factory.Build(tc.input)\n\t\t\tassert.IsType(t, tc.expectedIterator, iterator)\n\t\t})\n\t}\n}\n\nfunc TestSimpleBatchingIterator_SetsLengthAndDelay(t *testing.T) {\n\tdata := []*model.CloudwatchData{\n\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Period: 101, Delay: 100}},\n\t}\n\tparams := StartAndEndTimeParams{\n\t\tPeriod: 102,\n\t\tLength: 101,\n\t\tDelay:  100,\n\t}\n\titerator := NewSimpleBatchIterator(1, data, params)\n\t_, out := iterator.Next()\n\tassert.Equal(t, params, out)\n}\n\nfunc TestSimpleBatchingIterator_IterateFlow(t *testing.T) {\n\ttests := []struct {\n\t\tname                        string\n\t\tmetricsPerQuery             int\n\t\tlengthOfCloudwatchData      int\n\t\texpectedNumberOfCallsToNext int\n\t}{\n\t\t{\n\t\t\tname:                        \"1 per batch\",\n\t\t\tmetricsPerQuery:             1,\n\t\t\tlengthOfCloudwatchData:      10,\n\t\t\texpectedNumberOfCallsToNext: 10,\n\t\t},\n\t\t{\n\t\t\tname:                        \"divisible batches and requests\",\n\t\t\tmetricsPerQuery:             5,\n\t\t\tlengthOfCloudwatchData:      100,\n\t\t\texpectedNumberOfCallsToNext: 20,\n\t\t},\n\t\t{\n\t\t\tname:                        \"indivisible batches and requests\",\n\t\t\tmetricsPerQuery:             5,\n\t\t\tlengthOfCloudwatchData:      94,\n\t\t\texpectedNumberOfCallsToNext: 19,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdata := make([]*model.CloudwatchData, 0, tc.lengthOfCloudwatchData)\n\t\t\tfor i := 0; i < tc.lengthOfCloudwatchData; i++ {\n\t\t\t\tdata = append(data, getSampleMetricDatas(strconv.Itoa(i)))\n\t\t\t}\n\t\t\tparams := StartAndEndTimeParams{\n\t\t\t\tPeriod: data[0].GetMetricDataProcessingParams.Period,\n\t\t\t\tLength: data[0].GetMetricDataProcessingParams.Length,\n\t\t\t\tDelay:  data[0].GetMetricDataProcessingParams.Delay,\n\t\t\t}\n\t\t\titerator := NewSimpleBatchIterator(tc.metricsPerQuery, data, params)\n\n\t\t\toutputData := make([]*model.CloudwatchData, 0, len(data))\n\t\t\tnumberOfCallsToNext := 0\n\t\t\tfor iterator.HasMore() {\n\t\t\t\tnumberOfCallsToNext++\n\t\t\t\tbatch, _ := iterator.Next()\n\t\t\t\toutputData = append(outputData, batch...)\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, data, outputData)\n\t\t\tassert.Equal(t, tc.expectedNumberOfCallsToNext, numberOfCallsToNext)\n\t\t})\n\t}\n}\n\nfunc TestVaryingTimeParameterBatchingIterator_IterateFlow(t *testing.T) {\n\ttests := []struct {\n\t\tname                                          string\n\t\tmetricsPerQuery                               int\n\t\tlengthOfCloudwatchDataByStartAndEndTimeParams map[StartAndEndTimeParams]int\n\t\texpectedBatchesByStartAndEndTimeParams        map[StartAndEndTimeParams]int\n\t}{\n\t\t{\n\t\t\tname:            \"1 per batch - two time parameters\",\n\t\t\tmetricsPerQuery: 1,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 10,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 10,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 10,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 10,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"1 per batch - uses max length for available period + delay\",\n\t\t\tmetricsPerQuery: 1,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 10,\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 10,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 10,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 10,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 20,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 20,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"divisible batches - two time parameters\",\n\t\t\tmetricsPerQuery: 5,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 100,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 100,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 20,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 20,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"divisible batches - uses max length for available period + delay\",\n\t\t\tmetricsPerQuery: 5,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 100,\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 100,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 100,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 100,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 40,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 40,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"indivisible batches - two time parameters\",\n\t\t\tmetricsPerQuery: 5,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 94,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 94,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 19,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 19,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"indivisible batches - uses max length for available period + delay\",\n\t\t\tmetricsPerQuery: 5,\n\t\t\tlengthOfCloudwatchDataByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 10, Delay: 10}: 94,\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 94,\n\t\t\t\t{Period: 20, Length: 20, Delay: 20}: 94,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 94,\n\t\t\t},\n\t\t\texpectedBatchesByStartAndEndTimeParams: map[StartAndEndTimeParams]int{\n\t\t\t\t{Period: 10, Length: 30, Delay: 10}: 38,\n\t\t\t\t{Period: 20, Length: 40, Delay: 20}: 38,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tdata := []*model.CloudwatchData{}\n\t\t\tfor params, lengthOfCloudwatchData := range tc.lengthOfCloudwatchDataByStartAndEndTimeParams {\n\t\t\t\tfor i := 0; i < lengthOfCloudwatchData; i++ {\n\t\t\t\t\tentry := getSampleMetricDatas(strconv.Itoa(rand.Int()))\n\t\t\t\t\tentry.GetMetricDataProcessingParams.Length = params.Length\n\t\t\t\t\tentry.GetMetricDataProcessingParams.Delay = params.Delay\n\t\t\t\t\tentry.GetMetricDataProcessingParams.Period = params.Period\n\t\t\t\t\tdata = append(data, entry)\n\t\t\t\t}\n\t\t\t}\n\t\t\titerator := iteratorFactory{metricsPerQuery: tc.metricsPerQuery}.Build(data)\n\n\t\t\toutputData := make([]*model.CloudwatchData, 0, len(data))\n\t\t\tnumberOfBatchesByStartAndEndTimeParams := map[StartAndEndTimeParams]int{}\n\t\t\tfor iterator.HasMore() {\n\t\t\t\tbatch, params := iterator.Next()\n\t\t\t\tnumberOfBatchesByStartAndEndTimeParams[params]++\n\t\t\t\toutputData = append(outputData, batch...)\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, data, outputData)\n\t\t\tassert.Len(t, numberOfBatchesByStartAndEndTimeParams, len(tc.expectedBatchesByStartAndEndTimeParams))\n\t\t\tfor params, count := range tc.expectedBatchesByStartAndEndTimeParams {\n\t\t\t\tactualCount, ok := numberOfBatchesByStartAndEndTimeParams[params]\n\t\t\t\tassert.True(t, ok, \"output batches was missing expected batches of start and endtime params %+v\", params)\n\t\t\t\tassert.Equal(t, count, actualCount, \"%+v had an incorrect batch count\", params)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/processor.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/sync/errgroup\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype Client interface {\n\tGetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []cloudwatch.MetricDataResult\n}\n\ntype IteratorFactory interface {\n\t// Build returns an ideal batch iterator based on the provided CloudwatchData\n\tBuild(requests []*model.CloudwatchData) Iterator\n}\n\ntype Iterator interface {\n\t// Next returns the next batch of CloudWatch data be used when calling GetMetricData and the start + end time for\n\t// the GetMetricData call\n\t// If called when there are no more batches default values will be returned\n\tNext() ([]*model.CloudwatchData, StartAndEndTimeParams)\n\n\t// HasMore returns true if there are more batches to iterate otherwise false. Should be used in a loop\n\t// to govern calls to Next()\n\tHasMore() bool\n}\n\ntype StartAndEndTimeParams struct {\n\tPeriod int64\n\tLength int64\n\tDelay  int64\n}\n\ntype Processor struct {\n\tclient           Client\n\tconcurrency      int\n\twindowCalculator MetricWindowCalculator\n\tlogger           *slog.Logger\n\tfactory          IteratorFactory\n}\n\nfunc NewDefaultProcessor(logger *slog.Logger, client Client, metricsPerQuery int, concurrency int) Processor {\n\treturn NewProcessor(logger, client, concurrency, MetricWindowCalculator{clock: TimeClock{}}, &iteratorFactory{metricsPerQuery: metricsPerQuery})\n}\n\nfunc NewProcessor(logger *slog.Logger, client Client, concurrency int, windowCalculator MetricWindowCalculator, factory IteratorFactory) Processor {\n\treturn Processor{\n\t\tlogger:           logger,\n\t\tclient:           client,\n\t\tconcurrency:      concurrency,\n\t\twindowCalculator: windowCalculator,\n\t\tfactory:          factory,\n\t}\n}\n\nfunc (p Processor) Run(ctx context.Context, namespace string, requests []*model.CloudwatchData) ([]*model.CloudwatchData, error) {\n\tif len(requests) == 0 {\n\t\treturn requests, nil\n\t}\n\n\tg, gCtx := errgroup.WithContext(ctx)\n\tg.SetLimit(p.concurrency)\n\n\titerator := p.factory.Build(requests)\n\tfor iterator.HasMore() {\n\t\tbatch, batchParams := iterator.Next()\n\t\tg.Go(func() error {\n\t\t\tbatch = addQueryIDsToBatch(batch)\n\t\t\tstartTime, endTime := p.windowCalculator.Calculate(toSecondDuration(batchParams.Period), toSecondDuration(batchParams.Length), toSecondDuration(batchParams.Delay))\n\t\t\tp.logger.Debug(\"GetMetricData Window\", \"start_time\", startTime.Format(TimeFormat), \"end_time\", endTime.Format(TimeFormat))\n\n\t\t\tdata := p.client.GetMetricData(gCtx, batch, namespace, startTime, endTime)\n\t\t\tif data != nil {\n\t\t\t\tmapResultsToBatch(p.logger, data, batch)\n\t\t\t} else {\n\t\t\t\tp.logger.Warn(\"GetMetricData partition empty result\", \"start\", startTime, \"end\", endTime)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, fmt.Errorf(\"GetMetricData work group error: %w\", err)\n\t}\n\n\t// Remove unprocessed/unknown elements in place, if any. Since getMetricDatas\n\t// is a slice of pointers, the compaction can be easily done in-place.\n\trequests = compact(requests, func(m *model.CloudwatchData) bool {\n\t\treturn m.GetMetricDataResult != nil\n\t})\n\n\treturn requests, nil\n}\n\nfunc addQueryIDsToBatch(batch []*model.CloudwatchData) []*model.CloudwatchData {\n\tfor i, entry := range batch {\n\t\tentry.GetMetricDataProcessingParams.QueryID = indexToQueryID(i)\n\t}\n\n\treturn batch\n}\n\nfunc mapResultsToBatch(logger *slog.Logger, results []cloudwatch.MetricDataResult, batch []*model.CloudwatchData) {\n\tfor _, entry := range results {\n\t\tid, err := queryIDToIndex(entry.ID)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"GetMetricData returned unknown Query ID\", \"err\", err, \"query_id\", id)\n\t\t\tcontinue\n\t\t}\n\t\tif batch[id].GetMetricDataResult == nil {\n\t\t\tcloudwatchData := batch[id]\n\n\t\t\tmappedDataPoints := make([]model.DataPoint, 0, len(entry.DataPoints))\n\t\t\tfor i := 0; i < len(entry.DataPoints); i++ {\n\t\t\t\tmappedDataPoints = append(mappedDataPoints, model.DataPoint{Value: entry.DataPoints[i].Value, Timestamp: entry.DataPoints[i].Timestamp})\n\t\t\t}\n\n\t\t\tcloudwatchData.GetMetricDataResult = &model.GetMetricDataResult{\n\t\t\t\tStatistic:  cloudwatchData.GetMetricDataProcessingParams.Statistic,\n\t\t\t\tDataPoints: mappedDataPoints,\n\t\t\t}\n\n\t\t\t// All GetMetricData processing is done clear the params\n\t\t\tcloudwatchData.GetMetricDataProcessingParams = nil\n\t\t}\n\t}\n}\n\nfunc indexToQueryID(i int) string {\n\treturn fmt.Sprintf(\"id_%d\", i)\n}\n\nfunc queryIDToIndex(queryID string) (int, error) {\n\tnoID := strings.TrimPrefix(queryID, \"id_\")\n\tid, err := strconv.Atoi(noID)\n\treturn id, err\n}\n\nfunc toSecondDuration(i int64) time.Duration {\n\treturn time.Duration(i) * time.Second\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/processor_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype cloudwatchDataInput struct {\n\tMetricName                    string\n\tGetMetricDataProcessingParams *model.GetMetricDataProcessingParams\n}\n\ntype cloudwatchDataOutput struct {\n\tMetricName string\n\t*model.GetMetricDataResult\n}\n\ntype metricDataResultForMetric struct {\n\tMetricName string\n\tresult     cloudwatch.MetricDataResult\n}\n\ntype testClient struct {\n\tGetMetricDataFunc             func(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []cloudwatch.MetricDataResult\n\tGetMetricDataResultForMetrics []metricDataResultForMetric\n}\n\nfunc (t testClient) GetMetricData(ctx context.Context, getMetricData []*model.CloudwatchData, namespace string, startTime time.Time, endTime time.Time) []cloudwatch.MetricDataResult {\n\tif t.GetMetricDataResultForMetrics != nil {\n\t\tvar result []cloudwatch.MetricDataResult\n\t\tfor _, datum := range getMetricData {\n\t\t\tfor _, response := range t.GetMetricDataResultForMetrics {\n\t\t\t\tif datum.MetricName == response.MetricName {\n\t\t\t\t\tresponse.result.ID = datum.GetMetricDataProcessingParams.QueryID\n\t\t\t\t\tresult = append(result, response.result)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn result\n\t}\n\treturn t.GetMetricDataFunc(ctx, getMetricData, namespace, startTime, endTime)\n}\n\nfunc TestProcessor_Run(t *testing.T) {\n\tnow := time.Now()\n\ttests := []struct {\n\t\tname                       string\n\t\trequests                   []*cloudwatchDataInput\n\t\tmetricDataResultForMetrics []metricDataResultForMetric\n\t\twant                       []cloudwatchDataOutput\n\t\tmetricsPerBatch            int\n\t}{\n\t\t{\n\t\t\tname: \"successfully maps input to output when GetMetricData returns data\",\n\t\t\trequests: []*cloudwatchDataInput{\n\t\t\t\t{MetricName: \"metric-1\", GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Average\"}},\n\t\t\t},\n\t\t\tmetricDataResultForMetrics: []metricDataResultForMetric{\n\t\t\t\t{MetricName: \"metric-1\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(1000), Timestamp: now}}}},\n\t\t\t},\n\t\t\twant: []cloudwatchDataOutput{\n\t\t\t\t{MetricName: \"metric-1\", GetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Average\", DataPoints: []model.DataPoint{{Value: aws.Float64(1000), Timestamp: now}}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"handles duplicate results\",\n\t\t\trequests: []*cloudwatchDataInput{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Min\"}, MetricName: \"MetricOne\"},\n\t\t\t},\n\t\t\tmetricDataResultForMetrics: []metricDataResultForMetric{\n\t\t\t\t{MetricName: \"MetricOne\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}}}},\n\t\t\t\t{MetricName: \"MetricOne\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(15), Timestamp: time.Date(2023, time.June, 7, 2, 9, 8, 0, time.UTC)}}}},\n\t\t\t},\n\t\t\twant: []cloudwatchDataOutput{\n\t\t\t\t{MetricName: \"MetricOne\", GetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tStatistic:  \"Min\",\n\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}},\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"does not return a request when QueryID is not in MetricDataResult\",\n\t\t\trequests: []*cloudwatchDataInput{\n\t\t\t\t{MetricName: \"metric-1\", GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Average\"}},\n\t\t\t\t{MetricName: \"metric-2\", GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Average\"}},\n\t\t\t},\n\t\t\tmetricDataResultForMetrics: []metricDataResultForMetric{\n\t\t\t\t{MetricName: \"metric-1\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(1000), Timestamp: now}}}},\n\t\t\t},\n\t\t\twant: []cloudwatchDataOutput{\n\t\t\t\t{MetricName: \"metric-1\", GetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Average\", DataPoints: []model.DataPoint{{Value: aws.Float64(1000), Timestamp: now}}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"maps nil metric dataPoints\",\n\t\t\trequests: []*cloudwatchDataInput{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Min\"}, MetricName: \"MetricOne\"},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Max\"}, MetricName: \"MetricTwo\"},\n\t\t\t},\n\t\t\tmetricDataResultForMetrics: []metricDataResultForMetric{\n\t\t\t\t{MetricName: \"MetricOne\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}}}},\n\t\t\t\t{MetricName: \"MetricTwo\"},\n\t\t\t},\n\t\t\twant: []cloudwatchDataOutput{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricOne\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Min\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricTwo\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Max\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:            \"successfully maps input to output when multiple batches are involved\",\n\t\t\tmetricsPerBatch: 1,\n\t\t\trequests: []*cloudwatchDataInput{\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Min\"}, MetricName: \"MetricOne\"},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Max\"}, MetricName: \"MetricTwo\"},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Sum\"}, MetricName: \"MetricThree\"},\n\t\t\t\t{GetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{Statistic: \"Count\"}, MetricName: \"MetricFour\"},\n\t\t\t},\n\t\t\tmetricDataResultForMetrics: []metricDataResultForMetric{\n\t\t\t\t{MetricName: \"MetricOne\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}}}},\n\t\t\t\t{MetricName: \"MetricTwo\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(12), Timestamp: time.Date(2023, time.June, 7, 2, 9, 8, 0, time.UTC)}}}},\n\t\t\t\t{MetricName: \"MetricThree\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(15), Timestamp: time.Date(2023, time.June, 7, 3, 9, 8, 0, time.UTC)}}}},\n\t\t\t\t{MetricName: \"MetricFour\", result: cloudwatch.MetricDataResult{DataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(20), Timestamp: time.Date(2023, time.June, 7, 4, 9, 8, 0, time.UTC)}}}},\n\t\t\t},\n\t\t\twant: []cloudwatchDataOutput{\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricOne\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Min\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(5), Timestamp: time.Date(2023, time.June, 7, 1, 9, 8, 0, time.UTC)}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricTwo\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Max\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(12), Timestamp: time.Date(2023, time.June, 7, 2, 9, 8, 0, time.UTC)}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricThree\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Sum\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(15), Timestamp: time.Date(2023, time.June, 7, 3, 9, 8, 0, time.UTC)}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMetricName: \"MetricFour\",\n\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\tStatistic:  \"Count\",\n\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(20), Timestamp: time.Date(2023, time.June, 7, 4, 9, 8, 0, time.UTC)}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmetricsPerQuery := 500\n\t\t\tif tt.metricsPerBatch != 0 {\n\t\t\t\tmetricsPerQuery = tt.metricsPerBatch\n\t\t\t}\n\t\t\tr := NewDefaultProcessor(promslog.NewNopLogger(), testClient{GetMetricDataResultForMetrics: tt.metricDataResultForMetrics}, metricsPerQuery, 1)\n\t\t\tcloudwatchData, err := r.Run(context.Background(), \"anything_is_fine\", ToCloudwatchData(tt.requests))\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, cloudwatchData, len(tt.want))\n\t\t\tgot := make([]cloudwatchDataOutput, 0, len(cloudwatchData))\n\t\t\tfor _, data := range cloudwatchData {\n\t\t\t\tassert.Nil(t, data.GetMetricStatisticsResult)\n\t\t\t\tassert.Nil(t, data.GetMetricDataProcessingParams)\n\t\t\t\tassert.NotNil(t, data.GetMetricDataResult)\n\t\t\t\tgot = append(got, cloudwatchDataOutput{\n\t\t\t\t\tMetricName:          data.MetricName,\n\t\t\t\t\tGetMetricDataResult: data.GetMetricDataResult,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc ToCloudwatchData(input []*cloudwatchDataInput) []*model.CloudwatchData {\n\toutput := make([]*model.CloudwatchData, 0, len(input))\n\tfor _, i := range input {\n\t\tcloudwatchData := &model.CloudwatchData{\n\t\t\tMetricName:                    i.MetricName,\n\t\t\tResourceName:                  \"test\",\n\t\t\tNamespace:                     \"test\",\n\t\t\tTags:                          []model.Tag{{Key: \"tag\", Value: \"value\"}},\n\t\t\tDimensions:                    []model.Dimension{{Name: \"dimension\", Value: \"value\"}},\n\t\t\tGetMetricDataProcessingParams: i.GetMetricDataProcessingParams,\n\t\t\tGetMetricDataResult:           nil,\n\t\t\tGetMetricStatisticsResult:     nil,\n\t\t}\n\t\toutput = append(output, cloudwatchData)\n\t}\n\treturn output\n}\n\nfunc getSampleMetricDatas(id string) *model.CloudwatchData {\n\treturn &model.CloudwatchData{\n\t\tMetricName: \"StorageBytes\",\n\t\tDimensions: []model.Dimension{\n\t\t\t{\n\t\t\t\tName:  \"FileSystemId\",\n\t\t\t\tValue: \"fs-abc123\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"StorageClass\",\n\t\t\t\tValue: \"Standard\",\n\t\t\t},\n\t\t},\n\t\tResourceName: id,\n\t\tNamespace:    \"efs\",\n\t\tTags: []model.Tag{\n\t\t\t{\n\t\t\t\tKey:   \"Value1\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey:   \"Value2\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t},\n\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\tNilToZero:              false,\n\t\t\tAddCloudwatchTimestamp: false,\n\t\t\tExportAllDataPoints:    false,\n\t\t},\n\t\tGetMetricDataProcessingParams: &model.GetMetricDataProcessingParams{\n\t\t\tPeriod:    60,\n\t\t\tLength:    60,\n\t\t\tDelay:     0,\n\t\t\tStatistic: \"Average\",\n\t\t},\n\t}\n}\n\nfunc BenchmarkProcessorRun(b *testing.B) {\n\ttype testcase struct {\n\t\tconcurrency        int\n\t\tmetricsPerQuery    int\n\t\ttestResourcesCount int\n\t}\n\n\tfor name, tc := range map[string]testcase{\n\t\t\"small case\": {\n\t\t\tconcurrency:        10,\n\t\t\tmetricsPerQuery:    500,\n\t\t\ttestResourcesCount: 10,\n\t\t},\n\t\t\"medium case\": {\n\t\t\tconcurrency:        10,\n\t\t\tmetricsPerQuery:    500,\n\t\t\ttestResourcesCount: 1000,\n\t\t},\n\t\t\"big case\": {\n\t\t\tconcurrency:        10,\n\t\t\tmetricsPerQuery:    500,\n\t\t\ttestResourcesCount: 2000,\n\t\t},\n\t} {\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tdoBench(b, tc.metricsPerQuery, tc.testResourcesCount, tc.concurrency)\n\t\t})\n\t}\n}\n\nfunc doBench(b *testing.B, metricsPerQuery, testResourcesCount int, concurrency int) {\n\ttestResourceIDs := make([]string, testResourcesCount)\n\tfor i := 0; i < testResourcesCount; i++ {\n\t\ttestResourceIDs[i] = fmt.Sprintf(\"test-resource-%d\", i)\n\t}\n\n\tclient := testClient{GetMetricDataFunc: func(_ context.Context, getMetricData []*model.CloudwatchData, _ string, _ time.Time, _ time.Time) []cloudwatch.MetricDataResult {\n\t\tb.StopTimer()\n\t\tresults := make([]cloudwatch.MetricDataResult, 0, len(getMetricData))\n\t\tfor _, entry := range getMetricData {\n\t\t\tresults = append(results, cloudwatch.MetricDataResult{\n\t\t\t\tID:         entry.GetMetricDataProcessingParams.QueryID,\n\t\t\t\tDataPoints: []cloudwatch.DataPoint{{Value: aws.Float64(1), Timestamp: time.Now()}},\n\t\t\t})\n\t\t}\n\t\tb.StartTimer()\n\t\treturn results\n\t}}\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// stop timer to not affect benchmark run\n\t\t// this has to do in every run, since running the processor mutates the metric datas slice\n\t\tb.StopTimer()\n\t\tdatas := make([]*model.CloudwatchData, 0, testResourcesCount)\n\t\tfor i := 0; i < testResourcesCount; i++ {\n\t\t\tdatas = append(datas, getSampleMetricDatas(testResourceIDs[i]))\n\t\t}\n\t\tr := NewDefaultProcessor(promslog.NewNopLogger(), client, metricsPerQuery, concurrency)\n\t\t// re-start timer\n\t\tb.ReportAllocs()\n\t\tb.StartTimer()\n\n\t\t//nolint:errcheck\n\t\tr.Run(context.Background(), \"anything_is_fine\", datas)\n\t}\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/windowcalculator.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport \"time\"\n\nconst TimeFormat = \"2006-01-02T15:04:05.999999-07:00\"\n\n// Clock small interface which allows for stubbing the time.Now() function for unit testing\ntype Clock interface {\n\tNow() time.Time\n}\n\n// TimeClock implementation of Clock interface which delegates to Go's Time package\ntype TimeClock struct{}\n\nfunc (tc TimeClock) Now() time.Time {\n\treturn time.Now()\n}\n\ntype MetricWindowCalculator struct {\n\tclock Clock\n}\n\n// Calculate computes the start and end time for the GetMetricData request to AWS\n// Always uses the wall clock time as starting point for calculations to ensure that\n// a variety of exporter configurations will work reliably.\nfunc (m MetricWindowCalculator) Calculate(period time.Duration, length time.Duration, delay time.Duration) (time.Time, time.Time) {\n\tnow := m.clock.Now()\n\tif period > 0 {\n\t\t// Round down the time to a factor of the period:\n\t\t// https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html#API_GetMetricData_RequestParameters\n\t\tnow = now.Add(-period / 2).Round(period)\n\t}\n\n\tstartTime := now.Add(-(length + delay))\n\tendTime := now.Add(-delay)\n\treturn startTime, endTime\n}\n"
  },
  {
    "path": "pkg/job/getmetricdata/windowcalculator_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage getmetricdata\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\n// StubClock stub implementation of Clock interface that allows tests\n// to control time.Now()\ntype StubClock struct {\n\tcurrentTime time.Time\n}\n\nfunc (mt StubClock) Now() time.Time {\n\treturn mt.currentTime\n}\n\nfunc Test_MetricWindow(t *testing.T) {\n\ttype data struct {\n\t\troundingPeriod    time.Duration\n\t\tlength            time.Duration\n\t\tdelay             time.Duration\n\t\tclock             StubClock\n\t\texpectedStartTime time.Time\n\t\texpectedEndTime   time.Time\n\t}\n\n\ttestCases := []struct {\n\t\ttestName string\n\t\tdata     data\n\t}{\n\t\t{\n\t\t\ttestName: \"Go back four minutes and round to the nearest two minutes with two minute delay\",\n\t\t\tdata: data{\n\t\t\t\troundingPeriod: 120 * time.Second,\n\t\t\t\tlength:         120 * time.Second,\n\t\t\t\tdelay:          120 * time.Second,\n\t\t\t\tclock: StubClock{\n\t\t\t\t\tcurrentTime: time.Date(2021, 11, 20, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\texpectedStartTime: time.Date(2021, 11, 19, 23, 56, 0, 0, time.UTC),\n\t\t\t\texpectedEndTime:   time.Date(2021, 11, 19, 23, 58, 0, 0, time.UTC),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttestName: \"Go back four minutes with two minute delay nad no rounding\",\n\t\t\tdata: data{\n\t\t\t\troundingPeriod: 0,\n\t\t\t\tlength:         120 * time.Second,\n\t\t\t\tdelay:          120 * time.Second,\n\t\t\t\tclock: StubClock{\n\t\t\t\t\tcurrentTime: time.Date(2021, 1, 1, 0, 0o2, 22, 33, time.UTC),\n\t\t\t\t},\n\t\t\t\texpectedStartTime: time.Date(2020, 12, 31, 23, 58, 22, 33, time.UTC),\n\t\t\t\texpectedEndTime:   time.Date(2021, 1, 1, 0, 0, 22, 33, time.UTC),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttestName: \"Go back two days and round to the nearest day (midnight) with zero delay\",\n\t\t\tdata: data{\n\t\t\t\troundingPeriod: 86400 * time.Second,  // 1 day\n\t\t\t\tlength:         172800 * time.Second, // 2 days\n\t\t\t\tdelay:          0,\n\t\t\t\tclock: StubClock{\n\t\t\t\t\tcurrentTime: time.Date(2021, 11, 20, 8, 33, 44, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\texpectedStartTime: time.Date(2021, 11, 18, 0, 0, 0, 0, time.UTC),\n\t\t\t\texpectedEndTime:   time.Date(2021, 11, 20, 0, 0, 0, 0, time.UTC),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttestName: \"Go back two days and round to the nearest 5 minutes with zero delay\",\n\t\t\tdata: data{\n\t\t\t\troundingPeriod: 300 * time.Second,    // 5 min\n\t\t\t\tlength:         172800 * time.Second, // 2 days\n\t\t\t\tdelay:          0,\n\t\t\t\tclock: StubClock{\n\t\t\t\t\tcurrentTime: time.Date(2021, 11, 20, 8, 33, 44, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\texpectedStartTime: time.Date(2021, 11, 18, 8, 30, 0, 0, time.UTC),\n\t\t\t\texpectedEndTime:   time.Date(2021, 11, 20, 8, 30, 0, 0, time.UTC),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.testName, func(t *testing.T) {\n\t\t\tstartTime, endTime := MetricWindowCalculator{tc.data.clock}.Calculate(tc.data.roundingPeriod, tc.data.length, tc.data.delay)\n\t\t\tif !startTime.Equal(tc.data.expectedStartTime) {\n\t\t\t\tt.Errorf(\"start time incorrect. Expected: %s, Actual: %s\", tc.data.expectedStartTime.Format(TimeFormat), startTime.Format(TimeFormat))\n\t\t\t\tt.Errorf(\"end time incorrect. Expected: %s, Actual: %s\", tc.data.expectedEndTime.Format(TimeFormat), endTime.Format(TimeFormat))\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/listmetrics/processor.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage listmetrics\n\nimport \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n\ntype ProcessingParams struct {\n\tNamespace                 string\n\tMetrics                   []*model.MetricConfig\n\tRecentlyActiveOnly        bool\n\tDimensionNameRequirements []string\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"cmp\"\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/grafana/regexp\"\n\tprom_model \"github.com/prometheus/common/model\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar amazonMQBrokerSuffix = regexp.MustCompile(\"-[0-9]+$\")\n\n// Associator implements a \"best effort\" algorithm to automatically map the output\n// of the ListMetrics API to the list of resources retrieved from the Tagging API.\n// The core logic is based on a manually maintained list of regexes that extract\n// dimensions names from ARNs (see services.go). YACE supports auto-discovery for\n// those AWS namespaces where the ARN regexes are correctly defined.\ntype Associator struct {\n\t// mappings is a slice of dimensions-based mappings, one for each regex of a given namespace\n\tmappings []*dimensionsRegexpMapping\n\n\tlogger       *slog.Logger\n\tdebugEnabled bool\n}\n\ntype dimensionsRegexpMapping struct {\n\t// dimensions is a slice of dimensions names in a regex (normally 1 name is enough\n\t// to identify the resource type by its ARN, sometimes 2 or 3 dimensions names are\n\t// needed to identify sub-resources)\n\tdimensions []string\n\n\t// dimensionsMapping maps the set of dimensions (names and values) to a resource.\n\t// Dimensions names and values are encoded as a uint64 fingerprint.\n\tdimensionsMapping map[uint64]*model.TaggedResource\n}\n\nfunc (rm dimensionsRegexpMapping) toString() string {\n\tsb := strings.Builder{}\n\tsb.WriteString(\"{dimensions=[\")\n\tfor _, dim := range rm.dimensions {\n\t\tsb.WriteString(dim)\n\t}\n\tsb.WriteString(\"], dimensions_mappings={\")\n\tfor sign, res := range rm.dimensionsMapping {\n\t\tfmt.Fprintf(&sb, \"%d\", sign)\n\t\tsb.WriteString(\"=\")\n\t\tsb.WriteString(res.ARN)\n\t\tsb.WriteString(\",\")\n\t}\n\tsb.WriteString(\"}}\")\n\treturn sb.String()\n}\n\n// NewAssociator builds all mappings for the given dimensions regexps and list of resources.\nfunc NewAssociator(logger *slog.Logger, dimensionsRegexps []model.DimensionsRegexp, resources []*model.TaggedResource) Associator {\n\tassoc := Associator{\n\t\tmappings:     []*dimensionsRegexpMapping{},\n\t\tlogger:       logger,\n\t\tdebugEnabled: logger.Handler().Enabled(context.Background(), slog.LevelDebug), // caching if debug is enabled\n\t}\n\n\t// Keep track of resources that have already been mapped.\n\t// Each resource will be matched against at most one regex.\n\t// TODO(cristian): use a more memory-efficient data structure\n\tmappedResources := make([]bool, len(resources))\n\n\tfor _, dr := range dimensionsRegexps {\n\t\tm := &dimensionsRegexpMapping{\n\t\t\tdimensions:        dr.DimensionsNames,\n\t\t\tdimensionsMapping: map[uint64]*model.TaggedResource{},\n\t\t}\n\n\t\tfor idx, r := range resources {\n\t\t\tif mappedResources[idx] {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmatch := dr.Regexp.FindStringSubmatch(r.ARN)\n\t\t\tif match == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlabels := make(map[string]string, len(match))\n\t\t\tfor i := 1; i < len(match); i++ {\n\t\t\t\tlabels[dr.DimensionsNames[i-1]] = match[i]\n\t\t\t}\n\t\t\tsignature := prom_model.LabelsToSignature(labels)\n\t\t\tm.dimensionsMapping[signature] = r\n\t\t\tmappedResources[idx] = true\n\t\t}\n\n\t\tif len(m.dimensionsMapping) > 0 {\n\t\t\tassoc.mappings = append(assoc.mappings, m)\n\t\t}\n\n\t\t// The mapping might end up as empty in cases e.g. where\n\t\t// one of the regexps defined for the namespace doesn't match\n\t\t// against any of the tagged resources. This might happen for\n\t\t// example when we define multiple regexps (to capture sibling\n\t\t// or sub-resources) and one of them doesn't match any resource.\n\t\t// This behaviour is ok, we just want to debug log to keep track of it.\n\t\tif assoc.debugEnabled {\n\t\t\tlogger.Debug(\"unable to define a regex mapping\", \"regex\", dr.Regexp.String())\n\t\t}\n\t}\n\n\t// sort all mappings by decreasing number of dimensions names\n\t// (this is essential so that during matching we try to find the metric\n\t// with the most specific set of dimensions)\n\tslices.SortStableFunc(assoc.mappings, func(a, b *dimensionsRegexpMapping) int {\n\t\treturn -1 * cmp.Compare(len(a.dimensions), len(b.dimensions))\n\t})\n\n\tif assoc.debugEnabled {\n\t\tfor idx, regexpMapping := range assoc.mappings {\n\t\t\tlogger.Debug(\"associator mapping\", \"mapping_idx\", idx, \"mapping\", regexpMapping.toString())\n\t\t}\n\t}\n\n\treturn assoc\n}\n\n// AssociateMetricToResource finds the resource that corresponds to the given set of dimensions\n// names and values of a metric. The guess is based on the mapping built from dimensions regexps.\n// In case a map can't be found, the second return parameter indicates whether the metric should be\n// ignored or not.\nfunc (assoc Associator) AssociateMetricToResource(cwMetric *model.Metric) (*model.TaggedResource, bool) {\n\tlogger := assoc.logger.With(\"metric_name\", cwMetric.MetricName)\n\n\tif len(cwMetric.Dimensions) == 0 {\n\t\tlogger.Debug(\"metric has no dimensions, don't skip\")\n\n\t\t// Do not skip the metric (create a \"global\" metric)\n\t\treturn nil, false\n\t}\n\n\tdimensions := make([]string, 0, len(cwMetric.Dimensions))\n\tfor _, dimension := range cwMetric.Dimensions {\n\t\tdimensions = append(dimensions, dimension.Name)\n\t}\n\n\tif assoc.debugEnabled {\n\t\tlogger.Debug(\"associate loop start\", \"dimensions\", strings.Join(dimensions, \",\"))\n\t}\n\n\t// Attempt to find the regex mapping which contains the most\n\t// (but not necessarily all) the metric's dimensions names.\n\t// Regex mappings are sorted by decreasing number of dimensions names,\n\t// which favours find the mapping with most dimensions.\n\tmappingFound := false\n\tfor idx, regexpMapping := range assoc.mappings {\n\t\tif containsAll(dimensions, regexpMapping.dimensions) {\n\t\t\tif assoc.debugEnabled {\n\t\t\t\tlogger.Debug(\"found mapping\", \"mapping_idx\", idx, \"mapping\", regexpMapping.toString())\n\t\t\t}\n\n\t\t\t// A regex mapping has been found. The metric has all (and possibly more)\n\t\t\t// the dimensions computed for the mapping. Now compute a signature\n\t\t\t// of the labels (names and values) of the dimensions of this mapping, and try to\n\t\t\t// find a resource match.\n\t\t\t// This loop can run up to two times:\n\t\t\t//   On the first iteration, special-case dimension value\n\t\t\t// fixes to match the value up with the resource ARN are applied to particular namespaces.\n\t\t\t// \t  The second iteration will only run if a fix was applied for one of the special-case\n\t\t\t// namespaces and no match was found. It will try to find a match without applying the fixes.\n\t\t\t// This covers cases where the dimension value does line up with the resource ARN.\n\t\t\tmappingFound = true\n\t\t\tdimFixApplied := false\n\t\t\tshouldTryFixDimension := true\n\t\t\t// If no dimension fixes were applied, no need to try running again without the fixer.\n\t\t\tfor dimFixApplied || shouldTryFixDimension {\n\n\t\t\t\tvar labels map[string]string\n\t\t\t\tlabels, dimFixApplied = buildLabelsMap(cwMetric, regexpMapping, shouldTryFixDimension)\n\t\t\t\tsignature := prom_model.LabelsToSignature(labels)\n\n\t\t\t\t// Check if there's an entry for the labels (names and values) of the metric,\n\t\t\t\t// and return the resource in case.\n\t\t\t\tif resource, ok := regexpMapping.dimensionsMapping[signature]; ok {\n\t\t\t\t\tlogger.Debug(\"resource matched\", \"signature\", signature)\n\t\t\t\t\treturn resource, false\n\t\t\t\t}\n\n\t\t\t\t// No resource was matched for the current signature.\n\t\t\t\tlogger.Debug(\"resource signature attempt not matched\", \"signature\", signature)\n\t\t\t\tshouldTryFixDimension = false\n\t\t\t}\n\n\t\t\t// No resource was matched for any signature, continue iterating across the\n\t\t\t// rest of regex mappings to attempt to find another one with fewer dimensions.\n\t\t\tlogger.Debug(\"resource not matched\")\n\t\t}\n\t}\n\n\t// At this point, we haven't been able to match the metric against\n\t// any resource based on the dimensions the associator knows.\n\t// If a regex mapping was ever found in the loop above but no entry\n\t// (i.e. matching labels names and values) matched the metric dimensions,\n\t// skip the metric altogether.\n\t// Otherwise, if we didn't find any regex mapping it means we can't\n\t// correctly map the dimensions names to a resource arn regex,\n\t// but we still want to keep the metric and create a \"global\" metric.\n\tlogger.Debug(\"associate loop end\", \"skip\", mappingFound)\n\treturn nil, mappingFound\n}\n\n// buildLabelsMap returns a map of labels names and values, as well as whether the dimension fixer was applied.\n// For some namespaces, values might need to be modified in order\n// to match the dimension value extracted from ARN.\nfunc buildLabelsMap(cwMetric *model.Metric, regexpMapping *dimensionsRegexpMapping, shouldTryFixDimension bool) (map[string]string, bool) {\n\tlabels := make(map[string]string, len(cwMetric.Dimensions))\n\tdimFixApplied := false\n\tfor _, rDimension := range regexpMapping.dimensions {\n\t\tfor _, mDimension := range cwMetric.Dimensions {\n\t\t\tif shouldTryFixDimension {\n\t\t\t\tmDimension, dimFixApplied = fixDimension(cwMetric.Namespace, mDimension)\n\t\t\t}\n\n\t\t\tif rDimension == mDimension.Name {\n\t\t\t\tlabels[mDimension.Name] = mDimension.Value\n\t\t\t}\n\t\t}\n\t}\n\treturn labels, dimFixApplied\n}\n\n// fixDimension modifies the dimension value to accommodate special cases where\n// the dimension value doesn't match the resource ARN.\nfunc fixDimension(namespace string, dim model.Dimension) (model.Dimension, bool) {\n\t// AmazonMQ is special - for active/standby ActiveMQ brokers,\n\t// the value of the \"Broker\" dimension contains a number suffix\n\t// that is not part of the resource ARN\n\tif namespace == \"AWS/AmazonMQ\" && dim.Name == \"Broker\" {\n\t\tif amazonMQBrokerSuffix.MatchString(dim.Value) {\n\t\t\tdim.Value = amazonMQBrokerSuffix.ReplaceAllString(dim.Value, \"\")\n\t\t\treturn dim, true\n\t\t}\n\t}\n\n\t// AWS Sagemaker inference component name may have upper case characters\n\t// name value to be able to match the resource ARN\n\tif namespace == \"AWS/SageMaker\" && dim.Name == \"InferenceComponentName\" {\n\t\tdim.Value = strings.ToLower(dim.Value)\n\t\treturn dim, true\n\t}\n\n\treturn dim, false\n}\n\n// containsAll returns true if a contains all elements of b\nfunc containsAll(a, b []string) bool {\n\tfor _, e := range b {\n\t\tif slices.Contains(a, e) {\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_api_gateway_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar apiGatewayV1 = &model.TaggedResource{\n\tARN:       \"arn:aws:apigateway:us-east-2::/restapis/test-api\",\n\tNamespace: \"AWS/ApiGateway\",\n}\n\nvar apiGatewayV1Stage = &model.TaggedResource{\n\tARN:       \"arn:aws:apigateway:us-east-2::/restapis/test-api/stages/test\",\n\tNamespace: \"AWS/ApiGateway\",\n}\n\nvar apiGatewayV2 = &model.TaggedResource{\n\tARN:       \"arn:aws:apigateway:us-east-2::/apis/98765fghij\",\n\tNamespace: \"AWS/ApiGateway\",\n}\n\nvar apiGatewayV2Stage = &model.TaggedResource{\n\tARN:       \"arn:aws:apigateway:us-east-2::/apis/98765fghij/stages/$default\",\n\tNamespace: \"AWS/ApiGateway\",\n}\n\nvar apiGatewayResources = []*model.TaggedResource{apiGatewayV1, apiGatewayV1Stage, apiGatewayV2, apiGatewayV2Stage}\n\nfunc TestAssociatorAPIGateway(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match API Gateway V2 with ApiId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ApiGateway\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        apiGatewayResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"5xx\",\n\t\t\t\t\tNamespace:  \"AWS/ApiGateway\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ApiId\", Value: \"98765fghij\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: apiGatewayV2,\n\t\t},\n\t\t{\n\t\t\tname: \"should match API Gateway V2 with ApiId and Stage dimensions\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ApiGateway\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        apiGatewayResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"5xx\",\n\t\t\t\t\tNamespace:  \"AWS/ApiGateway\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ApiId\", Value: \"98765fghij\"},\n\t\t\t\t\t\t{Name: \"Stage\", Value: \"$default\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: apiGatewayV2Stage,\n\t\t},\n\t\t{\n\t\t\tname: \"should match API Gateway V1 with ApiName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ApiGateway\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        apiGatewayResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"5xx\",\n\t\t\t\t\tNamespace:  \"AWS/ApiGateway\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ApiName\", Value: \"test-api\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: apiGatewayV1,\n\t\t},\n\t\t{\n\t\t\tname: \"should match API Gateway V1 with ApiName and Stage dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ApiGateway\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        apiGatewayResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"5xx\",\n\t\t\t\t\tNamespace:  \"AWS/ApiGateway\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ApiName\", Value: \"test-api\"},\n\t\t\t\t\t\t{Name: \"Stage\", Value: \"test\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: apiGatewayV1Stage,\n\t\t},\n\t\t{\n\t\t\tname: \"should match API Gateway V1 with ApiName (Stage is not matched)\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ApiGateway\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        apiGatewayResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"5xx\",\n\t\t\t\t\tNamespace:  \"AWS/ApiGateway\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ApiName\", Value: \"test-api\"},\n\t\t\t\t\t\t{Name: \"Stage\", Value: \"dev\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: apiGatewayV1,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_client_vpn_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar clientVpn = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2:eu-central-1:075055617227:client-vpn-endpoint/cvpn-endpoint-0c9e5bd20be71e296\",\n\tNamespace: \"AWS/ClientVPN\",\n}\n\nfunc TestAssociatorClientVPN(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match ClientVPN with Endpoint dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ClientVPN\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{clientVpn},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CrlDaysToExpiry\",\n\t\t\t\t\tNamespace:  \"AWS/ClientVPN\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Endpoint\", Value: \"cvpn-endpoint-0c9e5bd20be71e296\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: clientVpn,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_ddosprotection_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar protectedResources1 = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2:us-east-1:123456789012:instance/i-abc123\",\n\tNamespace: \"AWS/DDoSProtection\",\n}\n\nvar protectedResources2 = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2:us-east-1:123456789012:instance/i-def456\",\n\tNamespace: \"AWS/DDoSProtection\",\n}\n\nvar protectedResources = []*model.TaggedResource{\n\tprotectedResources1,\n\tprotectedResources2,\n}\n\nfunc TestAssociatorDDoSProtection(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with ResourceArn dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/DDoSProtection\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        protectedResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/DDoSProtection\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ResourceArn\", Value: \"arn:aws:ec2:us-east-1:123456789012:instance/i-abc123\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: protectedResources1,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\tassert.Equal(t, tc.expectedSkip, skip)\n\t\t\tassert.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_directoryservice_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar directory = &model.TaggedResource{\n\tARN:       \"arn:aws:ds::012345678901:directory/d-abc123\",\n\tNamespace: \"AWS/DirectoryService\",\n}\n\nfunc TestAssociatorDirectoryService(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match directory id with Directory ID dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/DirectoryService\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{directory},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Current Bandwidth\",\n\t\t\t\t\tNamespace:  \"AWS/DirectoryService\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Metric Category\", Value: \"NTDS\"},\n\t\t\t\t\t\t{Name: \"Domain Controller IP\", Value: \"123.123.123.123\"},\n\t\t\t\t\t\t{Name: \"Directory ID\", Value: \"d-abc123\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: directory,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_dx_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar dxVif = &model.TaggedResource{\n\tARN:       \"arn:aws:directconnect::012345678901:dxvif/dxvif-abc123\",\n\tNamespace: \"AWS/DX\",\n}\n\nfunc TestAssociatorDX(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match Virtual Interface with VirtualInterfaceId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/DX\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{dxVif},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"VirtualInterfaceBpsIngress\",\n\t\t\t\t\tNamespace:  \"AWS/DX\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ConnectionId\", Value: \"dxlag-abc123\"},\n\t\t\t\t\t\t{Name: \"VirtualInterfaceId\", Value: \"dxvif-abc123\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: dxVif,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_ec2_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar ec2Instance1 = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2:us-east-1:123456789012:instance/i-abc123\",\n\tNamespace: \"AWS/EC2\",\n}\n\nvar ec2Instance2 = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2:us-east-1:123456789012:instance/i-def456\",\n\tNamespace: \"AWS/EC2\",\n}\n\nvar ec2Resources = []*model.TaggedResource{\n\tec2Instance1,\n\tec2Instance2,\n}\n\nfunc TestAssociatorEC2(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with InstanceId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EC2\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ec2Resources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/EC2\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InstanceId\", Value: \"i-abc123\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ec2Instance1,\n\t\t},\n\t\t{\n\t\t\tname: \"should match another instance with InstanceId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EC2\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ec2Resources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/EC2\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InstanceId\", Value: \"i-def456\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ec2Instance2,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched InstanceId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EC2\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ec2Resources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/EC2\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InstanceId\", Value: \"i-blahblah\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"should not skip when unmatching because of non-ARN dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/EC2\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ec2Resources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/EC2\",\n\t\t\t\t\tMetricName: \"StatusCheckFailed_System\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"AutoScalingGroupName\", Value: \"some-asg-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_ec_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar ecServerless = &model.TaggedResource{\n\tARN:       \"arn:aws:elasticache:eu-east-1:123456789012:serverlesscache:test-serverless-cluster\",\n\tNamespace: \"AWS/ElastiCache\",\n}\n\nvar ecCluster = &model.TaggedResource{\n\tARN:       \"arn:aws:elasticache:eu-east-1:123456789012:cluster:test-cluster-0001-001\",\n\tNamespace: \"AWS/ElastiCache\",\n}\n\nvar ecResources = []*model.TaggedResource{\n\tecServerless,\n\tecCluster,\n}\n\nfunc TestAssociatorEC(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with clusterId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ElastiCache\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"TotalCmdsCount\",\n\t\t\t\t\tNamespace:  \"AWS/ElastiCache\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"clusterId\", Value: \"test-serverless-cluster\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ecServerless,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with CacheClusterId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ElastiCache\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"EngineCPUUtilization\",\n\t\t\t\t\tNamespace:  \"AWS/ElastiCache\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"CacheClusterId\", Value: \"test-cluster-0001-001\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ecCluster,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched CacheClusterId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ElastiCache\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"EngineCPUUtilization\",\n\t\t\t\t\tNamespace:  \"AWS/ElastiCache\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"CacheClusterId\", Value: \"test-cluster-0001-002\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched clusterId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ElastiCache\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"TotalCmdsCount\",\n\t\t\t\t\tNamespace:  \"AWS/ElastiCache\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"clusterId\", Value: \"test-unmatched-serverless-cluster\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_ecs_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar ecsCluster = &model.TaggedResource{\n\tARN:       \"arn:aws:ecs:af-south-1:123456789222:cluster/sampleCluster\",\n\tNamespace: \"AWS/ECS\",\n}\n\nvar ecsService1 = &model.TaggedResource{\n\tARN:       \"arn:aws:ecs:af-south-1:123456789222:service/sampleCluster/service1\",\n\tNamespace: \"AWS/ECS\",\n}\n\nvar ecsService2 = &model.TaggedResource{\n\tARN:       \"arn:aws:ecs:af-south-1:123456789222:service/sampleCluster/service2\",\n\tNamespace: \"AWS/ECS\",\n}\n\nvar ecsResources = []*model.TaggedResource{\n\tecsCluster,\n\tecsService1,\n\tecsService2,\n}\n\nfunc TestAssociatorECS(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"cluster metric should be assigned cluster resource\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ECS\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecsResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"MemoryReservation\",\n\t\t\t\t\tNamespace:  \"AWS/ECS\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"sampleCluster\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ecsCluster,\n\t\t},\n\t\t{\n\t\t\tname: \"service metric should be assigned service1 resource\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ECS\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecsResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"AWS/ECS\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"sampleCluster\"},\n\t\t\t\t\t\t{Name: \"ServiceName\", Value: \"service1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ecsService1,\n\t\t},\n\t\t{\n\t\t\tname: \"service metric should be assigned service2 resource\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/ECS\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ecsResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"AWS/ECS\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"sampleCluster\"},\n\t\t\t\t\t\t{Name: \"ServiceName\", Value: \"service2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ecsService2,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_event_roles_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar eventRule0 = &model.TaggedResource{\n\tARN:       \"arn:aws:events:eu-central-1:112246171613:rule/event-bus-name/rule-name\",\n\tNamespace: \"AWS/Events\",\n}\n\nvar eventRule1 = &model.TaggedResource{\n\tARN:       \"arn:aws:events:eu-central-1:123456789012:rule/aws.partner/partner.name/123456/rule-name\",\n\tNamespace: \"AWS/Events\",\n}\n\nvar eventRuleResources = []*model.TaggedResource{\n\teventRule0, eventRule1,\n}\n\nfunc TestAssociatorEventRule(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"2 dimensions should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Events\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        eventRuleResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/Events\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EventBusName\", Value: \"event-bus-name\"},\n\t\t\t\t\t\t{Name: \"RuleName\", Value: \"rule-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: eventRule0,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_globalaccelerator_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar globalAcceleratorAccelerator = &model.TaggedResource{\n\tARN:       \"arn:aws:globalaccelerator::012345678901:accelerator/super-accelerator\",\n\tNamespace: \"AWS/GlobalAccelerator\",\n}\n\nvar globalAcceleratorListener = &model.TaggedResource{\n\tARN:       \"arn:aws:globalaccelerator::012345678901:accelerator/super-accelerator/listener/some_listener\",\n\tNamespace: \"AWS/GlobalAccelerator\",\n}\n\nvar globalAcceleratorEndpointGroup = &model.TaggedResource{\n\tARN:       \"arn:aws:globalaccelerator::012345678901:accelerator/super-accelerator/listener/some_listener/endpoint-group/eg1\",\n\tNamespace: \"AWS/GlobalAccelerator\",\n}\n\nvar globalAcceleratorResources = []*model.TaggedResource{\n\tglobalAcceleratorAccelerator,\n\tglobalAcceleratorListener,\n\tglobalAcceleratorEndpointGroup,\n}\n\nfunc TestAssociatorGlobalAccelerator(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with Accelerator dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GlobalAccelerator\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        globalAcceleratorResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProcessedBytesOut\",\n\t\t\t\t\tNamespace:  \"AWS/GlobalAccelerator\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Accelerator\", Value: \"super-accelerator\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: globalAcceleratorAccelerator,\n\t\t},\n\t\t{\n\t\t\tname: \"should match Listener with Accelerator and Listener dimensions\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GlobalAccelerator\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        globalAcceleratorResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProcessedBytesOut\",\n\t\t\t\t\tNamespace:  \"AWS/GlobalAccelerator\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Accelerator\", Value: \"super-accelerator\"},\n\t\t\t\t\t\t{Name: \"Listener\", Value: \"some_listener\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: globalAcceleratorListener,\n\t\t},\n\t\t{\n\t\t\tname: \"should match EndpointGroup with Accelerator, Listener and EndpointGroup dimensions\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GlobalAccelerator\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        globalAcceleratorResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProcessedBytesOut\",\n\t\t\t\t\tNamespace:  \"AWS/GlobalAccelerator\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Accelerator\", Value: \"super-accelerator\"},\n\t\t\t\t\t\t{Name: \"Listener\", Value: \"some_listener\"},\n\t\t\t\t\t\t{Name: \"EndpointGroup\", Value: \"eg1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: globalAcceleratorEndpointGroup,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_gwlb_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar gatewayLoadBalancer1 = &model.TaggedResource{\n\tARN:       \"arn:aws:elasticloadbalancing:eu-central-1:123456789012:loadbalancer/gwy/gwlb-1/4a049e69add14452\",\n\tNamespace: \"AWS/GatewayELB\",\n}\n\nvar gatewayLoadBalancer2 = &model.TaggedResource{\n\tARN:       \"arn:aws:elasticloadbalancing:eu-central-1:123456789012:loadbalancer/gwy/gwlb-2/a96cc19724cf1a87\",\n\tNamespace: \"AWS/GatewayELB\",\n}\n\nvar targetGroup1 = &model.TaggedResource{\n\tARN:       \"arn:aws:elasticloadbalancing:eu-central-1:123456789012:targetgroup/gwlb-target-group-1/012e9f368748cd345c\",\n\tNamespace: \"AWS/GatewayELB\",\n}\n\nvar gatewayLoadBalancerResources = []*model.TaggedResource{\n\tgatewayLoadBalancer1,\n\tgatewayLoadBalancer2,\n\ttargetGroup1,\n}\n\nfunc TestAssociatorGwlb(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with gateway loadbalancer one dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GatewayELB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        gatewayLoadBalancerResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"HealthyHostCount\",\n\t\t\t\t\tNamespace:  \"AWS/GatewayELB\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LoadBalancer\", Value: \"gwy/gwlb-1/4a049e69add14452\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: gatewayLoadBalancer1,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with gateway loadbalancer target group two dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GatewayELB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        gatewayLoadBalancerResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"HealthyHostCount\",\n\t\t\t\t\tNamespace:  \"AWS/GatewayELB\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LoadBalancer\", Value: \"gwy/gwlb-1/4a049e69add14452\"},\n\t\t\t\t\t\t{Name: \"TargetGroup\", Value: \"targetgroup/gwlb-target-group-1/012e9f368748cd345c\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: targetGroup1,\n\t\t},\n\t\t{\n\t\t\tname: \"should not match with any gateway loadbalancer\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/GatewayELB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        gatewayLoadBalancerResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"HealthyHostCount\",\n\t\t\t\t\tNamespace:  \"AWS/GatewayELB\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LoadBalancer\", Value: \"gwy/non-existing-gwlb/a96cc19724cf1a87\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_ipam_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar ec2IpamPool = &model.TaggedResource{\n\tARN:       \"arn:aws:ec2::123456789012:ipam-pool/ipam-pool-1ff5e4e9ad2c28b7b\",\n\tNamespace: \"AWS/IPAM\",\n}\n\nvar ipamResources = []*model.TaggedResource{\n\tec2IpamPool,\n}\n\nfunc TestAssociatorIpam(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with IpamPoolId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/IPAM\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ipamResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"VpcIPUsage\",\n\t\t\t\t\tNamespace:  \"AWS/IPAM\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"IpamPoolId\", Value: \"ipam-pool-1ff5e4e9ad2c28b7b\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: ec2IpamPool,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched IpamPoolId dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/IPAM\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        ipamResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"VpcIPUsage\",\n\t\t\t\t\tNamespace:  \"AWS/IPAM\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"IpamPoolId\", Value: \"ipam-pool-blahblah\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_kms_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar kmsKey = &model.TaggedResource{\n\tARN:       \"arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012\",\n\tNamespace: \"AWS/KMS\",\n}\n\nfunc TestAssociatorKMS(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with KMS dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/KMS\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{kmsKey},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"SecondsUntilKeyMaterialExpiration\",\n\t\t\t\t\tNamespace:  \"AWS/KMS\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"KeyId\", Value: \"12345678-1234-1234-1234-123456789012\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: kmsKey,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_lambda_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar lambdaFunction = &model.TaggedResource{\n\tARN:       \"arn:aws:lambda:us-east-2:123456789012:function:lambdaFunction\",\n\tNamespace: \"AWS/Lambda\",\n}\n\nvar lambdaResources = []*model.TaggedResource{lambdaFunction}\n\nfunc TestAssociatorLambda(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with FunctionName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Lambda\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        lambdaResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/Lambda\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"FunctionName\", Value: \"lambdaFunction\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: lambdaFunction,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched FunctionName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Lambda\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        lambdaResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/Lambda\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"FunctionName\", Value: \"anotherLambdaFunction\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with FunctionName and Resource dimensions\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Lambda\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        lambdaResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/Lambda\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"FunctionName\", Value: \"lambdaFunction\"},\n\t\t\t\t\t\t{Name: \"Resource\", Value: \"lambdaFunction\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: lambdaFunction,\n\t\t},\n\t\t{\n\t\t\tname: \"should not skip when empty dimensions\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Lambda\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        lambdaResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/Lambda\",\n\t\t\t\t\tDimensions: []model.Dimension{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_logging_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"bytes\"\n\t\"log/slog\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestAssociatorLogging(t *testing.T) {\n\ttype testcase struct {\n\t\tlevel slog.Level\n\t}\n\tfor name, tc := range map[string]testcase{\n\t\t\"debug enabled\":  {level: slog.LevelDebug},\n\t\t\"debug disabled\": {level: slog.LevelInfo},\n\t} {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuf := &bytes.Buffer{}\n\t\t\tlogger := slog.New(slog.NewTextHandler(buf, &slog.HandlerOptions{\n\t\t\t\tLevel: tc.level,\n\t\t\t}))\n\t\t\tassociator := NewAssociator(logger, config.SupportedServices.GetService(\"AWS/Logs\").ToModelDimensionsRegexp(), logGroupResources)\n\t\t\tres, skip := associator.AssociateMetricToResource(&model.Metric{\n\t\t\t\tMetricName: \"DeliveryThrottling\",\n\t\t\t\tNamespace:  \"AWS/Logs\",\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{Name: \"LogGroupName\", Value: \"/aws/lambda/log-group-1\"},\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NotNil(t, res)\n\t\t\trequire.False(t, skip)\n\n\t\t\tassertion := require.NotContains\n\t\t\tif tc.level == slog.LevelDebug {\n\t\t\t\tassertion = require.Contains\n\t\t\t}\n\t\t\tassertion(t, buf.String(), \"found mapping\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_logs_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar logGroup1 = &model.TaggedResource{\n\tARN:       \"arn:aws:logs:eu-central-1:123456789012:log-group:/aws/lambda/log-group-1\",\n\tNamespace: \"AWS/Logs\",\n}\n\nvar logGroup2 = &model.TaggedResource{\n\tARN:       \"arn:aws:logs:eu-central-1:123456789012:log-group:/custom/log-group-2\",\n\tNamespace: \"AWS/Logs\",\n}\n\nvar logGroupResources = []*model.TaggedResource{\n\tlogGroup1,\n\tlogGroup2,\n}\n\nfunc TestAssociatorLogs(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with log group one dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Logs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        logGroupResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"DeliveryThrottling\",\n\t\t\t\t\tNamespace:  \"AWS/Logs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LogGroupName\", Value: \"/aws/lambda/log-group-1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: logGroup1,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with log group two dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Logs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        logGroupResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"IncomingBytes\",\n\t\t\t\t\tNamespace:  \"AWS/Logs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LogGroupName\", Value: \"/custom/log-group-2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: logGroup2,\n\t\t},\n\t\t{\n\t\t\tname: \"should not match with any log group\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Logs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        logGroupResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ForwardingLogEvents\",\n\t\t\t\t\tNamespace:  \"AWS/Logs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LogGroupName\", Value: \"/custom/nonexisting/log-group-3\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_mediaconvert_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar mediaConvertQueue = &model.TaggedResource{\n\tARN:       \"arn:aws:mediaconvert:eu-west-1:631611414237:queues/a-queue\",\n\tNamespace: \"AWS/MediaConvert\",\n}\n\nvar mediaConvertQueueTwo = &model.TaggedResource{\n\tARN:       \"arn:aws:mediaconvert:eu-west-1:631611414237:queues/a-second-queue\",\n\tNamespace: \"AWS/MediaConvert\",\n}\n\nvar mediaConvertResources = []*model.TaggedResource{\n\tmediaConvertQueue,\n\tmediaConvertQueueTwo,\n}\n\nfunc TestAssociatorMediaConvert(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with mediaconvert queue one dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MediaConvert\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        mediaConvertResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"JobsCompletedCount\",\n\t\t\t\t\tNamespace:  \"AWS/MediaConvert\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Queue\", Value: \"arn:aws:mediaconvert:eu-west-1:631611414237:queues/a-queue\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: mediaConvertQueue,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with mediaconvert queue two dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MediaConvert\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        mediaConvertResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"JobsCompletedCount\",\n\t\t\t\t\tNamespace:  \"AWS/MediaConvert\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Queue\", Value: \"arn:aws:mediaconvert:eu-west-1:631611414237:queues/a-second-queue\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: mediaConvertQueueTwo,\n\t\t},\n\t\t{\n\t\t\tname: \"should not match with any mediaconvert queue\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MediaConvert\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        mediaConvertResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"JobsCompletedCount\",\n\t\t\t\t\tNamespace:  \"AWS/MediaConvert\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Queue\", Value: \"arn:aws:mediaconvert:eu-west-1:631611414237:queues/a-non-existing-queue\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_memorydb_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar memoryDBCluster1 = &model.TaggedResource{\n\tARN:       \"arn:aws:memorydb:us-east-1:123456789012:cluster/mycluster\",\n\tNamespace: \"AWS/MemoryDB\",\n}\n\nvar memoryDBCluster2 = &model.TaggedResource{\n\tARN:       \"arn:aws:memorydb:us-east-1:123456789012:cluster/othercluster\",\n\tNamespace: \"AWS/MemoryDB\",\n}\n\nvar memoryDBClusters = []*model.TaggedResource{\n\tmemoryDBCluster1,\n\tmemoryDBCluster2,\n}\n\nfunc TestAssociatorMemoryDB(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with ClusterName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MemoryDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        memoryDBClusters,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/MemoryDB\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"mycluster\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: memoryDBCluster1,\n\t\t},\n\t\t{\n\t\t\tname: \"should match another instance with ClusterName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MemoryDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        memoryDBClusters,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/MemoryDB\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"othercluster\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: memoryDBCluster2,\n\t\t},\n\t\t{\n\t\t\tname: \"should skip with unmatched ClusterName dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MemoryDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        memoryDBClusters,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/MemoryDB\",\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"ClusterName\", Value: \"blahblah\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"should not skip when unmatching because of non-ARN dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/MemoryDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        memoryDBClusters,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/MemoryDB\",\n\t\t\t\t\tMetricName: \"BytesUsedForMemoryDB\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"OtherName\", Value: \"some-other-value\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_mq_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar rabbitMQBroker = &model.TaggedResource{\n\tARN:       \"arn:aws:mq:us-east-2:123456789012:broker:rabbitmq-broker:b-000-111-222-333\",\n\tNamespace: \"AWS/AmazonMQ\",\n}\n\nvar rabbitMQBrokerWithActiveStyleName = &model.TaggedResource{\n\tARN:       \"arn:aws:mq:us-east-2:123456789012:broker:rabbitmq-broker-0:b-000-111-222-333\",\n\tNamespace: \"AWS/AmazonMQ\",\n}\n\nvar activeMQBroker = &model.TaggedResource{\n\tARN:       \"arn:aws:mq:us-east-2:123456789012:broker:activemq-broker:b-000-111-222-333\",\n\tNamespace: \"AWS/AmazonMQ\",\n}\n\nfunc TestAssociatorMQ(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with Broker dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/AmazonMQ\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{rabbitMQBroker},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProducerCount\",\n\t\t\t\t\tNamespace:  \"AWS/AmazonMQ\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Broker\", Value: \"rabbitmq-broker\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: rabbitMQBroker,\n\t\t},\n\t\t{\n\t\t\tname: \"should match with Broker dimension when broker name has a number suffix and does match ARN\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/AmazonMQ\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{rabbitMQBrokerWithActiveStyleName},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProducerCount\",\n\t\t\t\t\tNamespace:  \"AWS/AmazonMQ\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Broker\", Value: \"rabbitmq-broker-0\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: rabbitMQBrokerWithActiveStyleName,\n\t\t},\n\t\t{\n\t\t\t// ActiveMQ allows active/standby modes where the `Broker` dimension has values\n\t\t\t// like `brokername-1` and `brokername-2` which don't match the ARN (the dimension\n\t\t\t// regex will extract `Broker` as `brokername` from ARN)\n\t\t\tname: \"should match with Broker dimension when broker name has a number suffix and doesn't match ARN\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/AmazonMQ\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{activeMQBroker},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ProducerCount\",\n\t\t\t\t\tNamespace:  \"AWS/AmazonMQ\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Broker\", Value: \"activemq-broker-1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: activeMQBroker,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_qldb_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar validQldbInstance = &model.TaggedResource{\n\tARN:       \"arn:aws:qldb:us-east-1:123456789012:ledger/test1\",\n\tNamespace: \"AWS/QLDB\",\n}\n\nfunc TestAssociatorQLDB(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should match with ledger name dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/QLDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{validQldbInstance},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/QLDB\",\n\t\t\t\t\tMetricName: \"JournalStorage\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LedgerName\", Value: \"test2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"should not match with ledger name dimension when QLDB arn is not valid\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/QLDB\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        []*model.TaggedResource{validQldbInstance},\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tNamespace:  \"AWS/QLDB\",\n\t\t\t\t\tMetricName: \"JournalStorage\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"LedgerName\", Value: \"test1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: validQldbInstance,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_redshift_serverless_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar workgroup = &model.TaggedResource{\n\tARN:       \"arn:aws:redshift-serverless:us-east-1:123456789012:workgroup/my-workgroup1\",\n\tNamespace: \"AWS/Redshift-Serverless\",\n}\n\nvar namespace = &model.TaggedResource{\n\tARN:       \"arn:aws:redshift-serverless:us-east-1:123456789012:namespace/my-namespace1\",\n\tNamespace: \"AWS/Redshift-Serverless\",\n}\n\nvar redshiftResources = []*model.TaggedResource{\n\tworkgroup,\n\tnamespace,\n}\n\nfunc TestAssociatorRedshiftServerless(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"should not match nor skip with any workgroup none ARN dimension\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Redshift-Serverless\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        redshiftResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ComputeSeconds\",\n\t\t\t\t\tNamespace:  \"AWS/Redshift-Serverless\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Workgroup\", Value: \"my-nonexistant-workgroup-test1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_endpoint_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerEndpointHealthOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:endpoint/example-endpoint-one\",\n\tNamespace: \"/aws/sagemaker/Endpoints\",\n}\n\nvar sagemakerEndpointHealthTwo = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:endpoint/example-endpoint-two\",\n\tNamespace: \"/aws/sagemaker/Endpoints\",\n}\n\nvar sagemakerHealthResources = []*model.TaggedResource{\n\tsagemakerEndpointHealthOne,\n\tsagemakerEndpointHealthTwo,\n}\n\nfunc TestAssociatorSagemakerEndpoint(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"2 dimensions should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/Endpoints\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerHealthResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"MemoryUtilization\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/Endpoints\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"example-endpoint-two\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-two-variant-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerEndpointHealthTwo,\n\t\t},\n\t\t{\n\t\t\tname: \"2 dimensions should not match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/Endpoints\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerHealthResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"MemoryUtilization\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/Endpoints\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"example-endpoint-three\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-three-variant-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_inf_component_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerInfComponentJobOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:inference-component/example-inference-component-one\",\n\tNamespace: \"/aws/sagemaker/InferenceComponents\",\n}\n\nvar sagemakerInfComponentJobResources = []*model.TaggedResource{\n\tsagemakerInfComponentJobOne,\n}\n\nfunc TestAssociatorSagemakerInfComponentJob(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"1 dimension should not match but not skip\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/InferenceComponents\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInfComponentJobResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilizationNormalized\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/InferenceComponents\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InferenceComponentName\", Value: \"example-inference-component-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerInfComponentJobOne,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_inf_rec_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerInfRecJobOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:inference-recommendations-job/example-inf-rec-job-one\",\n\tNamespace: \"/aws/sagemaker/InferenceRecommendationsJobs\",\n}\n\nvar sagemakerInfRecJobResources = []*model.TaggedResource{\n\tsagemakerInfRecJobOne,\n}\n\nfunc TestAssociatorSagemakerInfRecJob(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"1 dimension should not match but not skip\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/InferenceRecommendationsJobs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInfRecJobResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ClientInvocations\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/InferenceRecommendationsJobs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"JobName\", Value: \"example-inf-rec-job-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerInfRecJobOne,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_pipeline_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerPipelineOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:pipeline/example-pipeline-one\",\n\tNamespace: \"AWS/Sagemaker/ModelBuildingPipeline\",\n}\n\nvar sagemakerPipelineTwo = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:pipeline/example-pipeline-two\",\n\tNamespace: \"AWS/Sagemaker/ModelBuildingPipeline\",\n}\n\nvar sagemakerPipelineResources = []*model.TaggedResource{\n\tsagemakerPipelineOne,\n\tsagemakerPipelineTwo,\n}\n\nfunc TestAssociatorSagemakerPipeline(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"2 dimensions should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Sagemaker/ModelBuildingPipeline\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerPipelineResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ExecutionStarted\",\n\t\t\t\t\tNamespace:  \"AWS/Sagemaker/ModelBuildingPipeline\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"PipelineName\", Value: \"example-pipeline-one\"},\n\t\t\t\t\t\t{Name: \"StepName\", Value: \"example-pipeline-one-step-two\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerPipelineOne,\n\t\t},\n\t\t{\n\t\t\tname: \"1 dimension should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Sagemaker/ModelBuildingPipeline\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerPipelineResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ExecutionStarted\",\n\t\t\t\t\tNamespace:  \"AWS/Sagemaker/ModelBuildingPipeline\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"PipelineName\", Value: \"example-pipeline-two\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerPipelineTwo,\n\t\t},\n\t\t{\n\t\t\tname: \"2 dimensions should not match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/Sagemaker/ModelBuildingPipeline\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerPipelineResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ExecutionStarted\",\n\t\t\t\t\tNamespace:  \"AWS/Sagemaker/ModelBuildingPipeline\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"PipelineName\", Value: \"example-pipeline-three\"},\n\t\t\t\t\t\t{Name: \"StepName\", Value: \"example-pipeline-three-step-two\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_processing_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerProcessingJobOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:processing-job/example-processing-job-one\",\n\tNamespace: \"/aws/sagemaker/ProcessingJobs\",\n}\n\nvar sagemakerProcessingJobResources = []*model.TaggedResource{\n\tsagemakerProcessingJobOne,\n}\n\nfunc TestAssociatorSagemakerProcessingJob(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"1 dimension should not match but not skip\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/ProcessingJobs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerProcessingJobResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/ProcessingJobs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Host\", Value: \"example-processing-job-one/algo-1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerEndpointInvocationOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:endpoint/example-endpoint-one\",\n\tNamespace: \"AWS/SageMaker\",\n}\n\nvar sagemakerEndpointInvocationTwo = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:endpoint/example-endpoint-two\",\n\tNamespace: \"AWS/SageMaker\",\n}\n\nvar sagemakerEndpointInvocationUpper = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:endpoint/example-endpoint-upper\",\n\tNamespace: \"AWS/SageMaker\",\n}\n\nvar sagemakerInferenceComponentInvocationOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:inference-component/example-inference-component-one\",\n\tNamespace: \"AWS/SageMaker\",\n}\n\nvar sagemakerInferenceComponentInvocationUpper = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:inference-component/example-inference-component-upper\",\n\tNamespace: \"AWS/SageMaker\",\n}\n\nvar sagemakerInvocationResources = []*model.TaggedResource{\n\tsagemakerEndpointInvocationOne,\n\tsagemakerEndpointInvocationTwo,\n\tsagemakerEndpointInvocationUpper,\n\tsagemakerInferenceComponentInvocationOne,\n\tsagemakerInferenceComponentInvocationUpper,\n}\n\nfunc TestAssociatorSagemaker(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"3 dimensions should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"example-endpoint-one\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-one-variant-one\"},\n\t\t\t\t\t\t{Name: \"EndpointConfigName\", Value: \"example-endpoint-one-endpoint-config\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerEndpointInvocationOne,\n\t\t},\n\t\t{\n\t\t\tname: \"2 dimensions should match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"example-endpoint-two\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-two-variant-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerEndpointInvocationTwo,\n\t\t},\n\t\t{\n\t\t\tname: \"2 dimensions should not match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"Invocations\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"example-endpoint-three\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-three-variant-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"2 dimensions should not match in Upper case\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ModelLatency\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"EndpointName\", Value: \"Example-Endpoint-Upper\"},\n\t\t\t\t\t\t{Name: \"VariantName\", Value: \"example-endpoint-two-variant-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     true,\n\t\t\texpectedResource: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"inference component match\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ModelLatency\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InferenceComponentName\", Value: \"example-inference-component-one\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerInferenceComponentInvocationOne,\n\t\t},\n\t\t{\n\t\t\tname: \"inference component match in Upper case\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"AWS/SageMaker\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerInvocationResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"ModelLatency\",\n\t\t\t\t\tNamespace:  \"AWS/SageMaker\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"InferenceComponentName\", Value: \"Example-Inference-Component-Upper\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: sagemakerInferenceComponentInvocationUpper,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_training_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerTrainingJobOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:training-job/example-training-job-one\",\n\tNamespace: \"/aws/sagemaker/TrainingJobs\",\n}\n\nvar sagemakerTrainingJobResources = []*model.TaggedResource{\n\tsagemakerTrainingJobOne,\n}\n\nfunc TestAssociatorSagemakerTrainingJob(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"1 dimension should not skip\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/TrainingJobs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerTrainingJobResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/TrainingJobs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Host\", Value: \"example-training-job-one/algo-1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/maxdimassociator/associator_sagemaker_transform_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage maxdimassociator\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar sagemakerTransformJobOne = &model.TaggedResource{\n\tARN:       \"arn:aws:sagemaker:us-west-2:123456789012:transform-job/example-transform-job-one\",\n\tNamespace: \"/aws/sagemaker/TransformJobs\",\n}\n\nvar sagemakerTransformJobResources = []*model.TaggedResource{\n\tsagemakerTransformJobOne,\n}\n\nfunc TestAssociatorSagemakerTransformJob(t *testing.T) {\n\ttype args struct {\n\t\tdimensionRegexps []model.DimensionsRegexp\n\t\tresources        []*model.TaggedResource\n\t\tmetric           *model.Metric\n\t}\n\n\ttype testCase struct {\n\t\tname             string\n\t\targs             args\n\t\texpectedSkip     bool\n\t\texpectedResource *model.TaggedResource\n\t}\n\n\ttestcases := []testCase{\n\t\t{\n\t\t\tname: \"1 dimension should not match but not skip\",\n\t\t\targs: args{\n\t\t\t\tdimensionRegexps: config.SupportedServices.GetService(\"/aws/sagemaker/TransformJobs\").ToModelDimensionsRegexp(),\n\t\t\t\tresources:        sagemakerTransformJobResources,\n\t\t\t\tmetric: &model.Metric{\n\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\tNamespace:  \"/aws/sagemaker/TransformJobs\",\n\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t{Name: \"Host\", Value: \"example-transform-job-one/algo-1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedSkip:     false,\n\t\t\texpectedResource: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassociator := NewAssociator(promslog.NewNopLogger(), tc.args.dimensionRegexps, tc.args.resources)\n\t\t\tres, skip := associator.AssociateMetricToResource(tc.args.metric)\n\t\t\trequire.Equal(t, tc.expectedSkip, skip)\n\t\t\trequire.Equal(t, tc.expectedResource, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/resourcemetadata/resource.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage resourcemetadata\n\nimport (\n\t\"context\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype Resource struct {\n\t// Name is an identifiable value for the resource and is variable dependent on the match made\n\t//\tIt will be the AWS ARN (Amazon Resource Name) if a unique resource was found\n\t//  It will be \"global\" if a unique resource was not found\n\t//  CustomNamespaces will have the custom namespace Name\n\tName string\n\t// Tags is a set of tags associated to the resource\n\tTags []model.Tag\n}\n\ntype Resources struct {\n\tStaticResource      *Resource\n\tAssociatedResources []*Resource\n}\n\ntype MetricResourceEnricher interface {\n\tEnrich(ctx context.Context, metrics []*model.Metric) ([]*model.Metric, Resources)\n}\n"
  },
  {
    "path": "pkg/job/scrape.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics\"\n\temconfig \"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/internal/enhancedmetrics/config\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/getmetricdata\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc ScrapeAwsData(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tjobsCfg model.JobsConfig,\n\tfactory clients.Factory,\n\tmetricsPerQuery int,\n\tcloudwatchConcurrency cloudwatch.ConcurrencyConfig,\n\ttaggingAPIConcurrency int,\n) ([]model.TaggedResourceResult, []model.CloudwatchMetricResult) {\n\tmux := &sync.Mutex{}\n\tcwData := make([]model.CloudwatchMetricResult, 0)\n\tawsInfoData := make([]model.TaggedResourceResult, 0)\n\tvar wg sync.WaitGroup\n\n\tvar enhancedMetricsService *enhancedmetrics.Service\n\tvar enhancedMetricsInitFailed bool\n\n\tfor _, discoveryJob := range jobsCfg.DiscoveryJobs {\n\t\t// initialize enhanced metrics service only if:\n\t\t// - the current discovery job has enhanced metrics configured\n\t\t// - the enhanced metrics service is not already initialized\n\t\t// - a previous initialization attempt has not already failed\n\t\tif discoveryJob.HasEnhancedMetrics() && enhancedMetricsService == nil && !enhancedMetricsInitFailed {\n\t\t\tif configProvider, ok := factory.(emconfig.RegionalConfigProvider); ok {\n\t\t\t\tenhancedMetricsService = enhancedmetrics.NewService(\n\t\t\t\t\tconfigProvider,\n\t\t\t\t\tenhancedmetrics.DefaultEnhancedMetricServiceRegistry,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tenhancedMetricsInitFailed = true\n\t\t\t\tlogger.Warn(\"Couldn't initialize enhanced metrics service\", \"factory_type\", fmt.Sprintf(\"%T\", factory), \"err\", \"does not implement GetAWSRegionalConfig\")\n\t\t\t}\n\t\t}\n\n\t\tfor _, role := range discoveryJob.Roles {\n\t\t\tfor _, region := range discoveryJob.Regions {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(discoveryJob model.DiscoveryJob, region string, role model.Role) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tjobLogger := logger.With(\"namespace\", discoveryJob.Namespace, \"region\", region, \"arn\", role.RoleArn)\n\t\t\t\t\taccountID, err := factory.GetAccountClient(region, role).GetAccount(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Error(\"Couldn't get account Id\", \"err\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tjobLogger = jobLogger.With(\"account\", accountID)\n\n\t\t\t\t\taccountAlias, err := factory.GetAccountClient(region, role).GetAccountAlias(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Warn(\"Couldn't get account alias\", \"err\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tcloudwatchClient := factory.GetCloudwatchClient(region, role, cloudwatchConcurrency)\n\t\t\t\t\tgmdProcessor := getmetricdata.NewDefaultProcessor(logger, cloudwatchClient, metricsPerQuery, cloudwatchConcurrency.GetMetricData)\n\n\t\t\t\t\tresources, metrics := runDiscoveryJob(\n\t\t\t\t\t\tctx,\n\t\t\t\t\t\tjobLogger,\n\t\t\t\t\t\tdiscoveryJob,\n\t\t\t\t\t\tregion,\n\t\t\t\t\t\tfactory.GetTaggingClient(region, role, taggingAPIConcurrency),\n\t\t\t\t\t\tcloudwatchClient,\n\t\t\t\t\t\tgmdProcessor,\n\t\t\t\t\t\tenhancedMetricsService,\n\t\t\t\t\t\trole,\n\t\t\t\t\t)\n\n\t\t\t\t\taddDataToOutput := len(metrics) != 0\n\t\t\t\t\tif config.FlagsFromCtx(ctx).IsFeatureEnabled(config.AlwaysReturnInfoMetrics) {\n\t\t\t\t\t\taddDataToOutput = addDataToOutput || len(resources) != 0\n\t\t\t\t\t}\n\t\t\t\t\tif addDataToOutput {\n\t\t\t\t\t\tsc := &model.ScrapeContext{\n\t\t\t\t\t\t\tRegion:       region,\n\t\t\t\t\t\t\tAccountID:    accountID,\n\t\t\t\t\t\t\tAccountAlias: accountAlias,\n\t\t\t\t\t\t\tCustomTags:   discoveryJob.CustomTags,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmetricResult := model.CloudwatchMetricResult{\n\t\t\t\t\t\t\tContext: sc,\n\t\t\t\t\t\t\tData:    metrics,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresourceResult := model.TaggedResourceResult{\n\t\t\t\t\t\t\tData: resources,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif discoveryJob.IncludeContextOnInfoMetrics {\n\t\t\t\t\t\t\tresourceResult.Context = sc\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tmux.Lock()\n\t\t\t\t\t\tawsInfoData = append(awsInfoData, resourceResult)\n\t\t\t\t\t\tcwData = append(cwData, metricResult)\n\t\t\t\t\t\tmux.Unlock()\n\t\t\t\t\t}\n\t\t\t\t}(discoveryJob, region, role)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, staticJob := range jobsCfg.StaticJobs {\n\t\tfor _, role := range staticJob.Roles {\n\t\t\tfor _, region := range staticJob.Regions {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(staticJob model.StaticJob, region string, role model.Role) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tjobLogger := logger.With(\"static_job_name\", staticJob.Name, \"region\", region, \"arn\", role.RoleArn)\n\t\t\t\t\taccountID, err := factory.GetAccountClient(region, role).GetAccount(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Error(\"Couldn't get account Id\", \"err\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tjobLogger = jobLogger.With(\"account\", accountID)\n\n\t\t\t\t\taccountAlias, err := factory.GetAccountClient(region, role).GetAccountAlias(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Warn(\"Couldn't get account alias\", \"err\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tmetrics := runStaticJob(ctx, jobLogger, staticJob, factory.GetCloudwatchClient(region, role, cloudwatchConcurrency))\n\t\t\t\t\tmetricResult := model.CloudwatchMetricResult{\n\t\t\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\t\t\tRegion:       region,\n\t\t\t\t\t\t\tAccountID:    accountID,\n\t\t\t\t\t\t\tAccountAlias: accountAlias,\n\t\t\t\t\t\t\tCustomTags:   staticJob.CustomTags,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: metrics,\n\t\t\t\t\t}\n\t\t\t\t\tmux.Lock()\n\t\t\t\t\tcwData = append(cwData, metricResult)\n\t\t\t\t\tmux.Unlock()\n\t\t\t\t}(staticJob, region, role)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, customNamespaceJob := range jobsCfg.CustomNamespaceJobs {\n\t\tfor _, role := range customNamespaceJob.Roles {\n\t\t\tfor _, region := range customNamespaceJob.Regions {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(customNamespaceJob model.CustomNamespaceJob, region string, role model.Role) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tjobLogger := logger.With(\"custom_metric_namespace\", customNamespaceJob.Namespace, \"region\", region, \"arn\", role.RoleArn)\n\t\t\t\t\taccountID, err := factory.GetAccountClient(region, role).GetAccount(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Error(\"Couldn't get account Id\", \"err\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tjobLogger = jobLogger.With(\"account\", accountID)\n\n\t\t\t\t\taccountAlias, err := factory.GetAccountClient(region, role).GetAccountAlias(ctx)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobLogger.Warn(\"Couldn't get account alias\", \"err\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tcloudwatchClient := factory.GetCloudwatchClient(region, role, cloudwatchConcurrency)\n\t\t\t\t\tgmdProcessor := getmetricdata.NewDefaultProcessor(logger, cloudwatchClient, metricsPerQuery, cloudwatchConcurrency.GetMetricData)\n\t\t\t\t\tmetrics := runCustomNamespaceJob(ctx, jobLogger, customNamespaceJob, cloudwatchClient, gmdProcessor)\n\t\t\t\t\tmetricResult := model.CloudwatchMetricResult{\n\t\t\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\t\t\tRegion:       region,\n\t\t\t\t\t\t\tAccountID:    accountID,\n\t\t\t\t\t\t\tAccountAlias: accountAlias,\n\t\t\t\t\t\t\tCustomTags:   customNamespaceJob.CustomTags,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tData: metrics,\n\t\t\t\t\t}\n\t\t\t\t\tmux.Lock()\n\t\t\t\t\tcwData = append(cwData, metricResult)\n\t\t\t\t\tmux.Unlock()\n\t\t\t\t}(customNamespaceJob, region, role)\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n\treturn awsInfoData, cwData\n}\n"
  },
  {
    "path": "pkg/job/scraper.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/cloudwatchrunner\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype Scraper struct {\n\tjobsCfg       model.JobsConfig\n\tlogger        *slog.Logger\n\trunnerFactory runnerFactory\n}\n\ntype runnerFactory interface {\n\tGetAccountClient(region string, role model.Role) account.Client\n\tNewResourceMetadataRunner(logger *slog.Logger, region string, role model.Role) ResourceMetadataRunner\n\tNewCloudWatchRunner(logger *slog.Logger, region string, role model.Role, job cloudwatchrunner.Job) CloudwatchRunner\n}\n\ntype ResourceMetadataRunner interface {\n\tRun(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error)\n}\n\ntype CloudwatchRunner interface {\n\tRun(ctx context.Context) ([]*model.CloudwatchData, error)\n}\n\nfunc NewScraper(logger *slog.Logger,\n\tjobsCfg model.JobsConfig,\n\trunnerFactory runnerFactory,\n) *Scraper {\n\treturn &Scraper{\n\t\trunnerFactory: runnerFactory,\n\t\tlogger:        logger,\n\t\tjobsCfg:       jobsCfg,\n\t}\n}\n\ntype ErrorType string\n\nvar (\n\tAccountErr              ErrorType = \"Account for job was not found\"\n\tResourceMetadataErr     ErrorType = \"Failed to run resource metadata for job\"\n\tCloudWatchCollectionErr ErrorType = \"Failed to gather cloudwatch metrics for job\"\n)\n\ntype Account struct {\n\tID    string\n\tAlias string\n}\n\nfunc (s Scraper) Scrape(ctx context.Context) ([]model.TaggedResourceResult, []model.CloudwatchMetricResult, []Error) {\n\t// Setup so we only do one GetAccount call per region + role combo when running jobs\n\troleRegionToAccount := map[model.Role]map[string]func() (Account, error){}\n\tjobConfigVisitor(s.jobsCfg, func(_ any, role model.Role, region string) {\n\t\tif _, exists := roleRegionToAccount[role]; !exists {\n\t\t\troleRegionToAccount[role] = map[string]func() (Account, error){}\n\t\t}\n\t\troleRegionToAccount[role][region] = sync.OnceValues[Account, error](func() (Account, error) {\n\t\t\tclient := s.runnerFactory.GetAccountClient(region, role)\n\t\t\taccountID, err := client.GetAccount(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn Account{}, fmt.Errorf(\"failed to get Account: %w\", err)\n\t\t\t}\n\t\t\ta := Account{\n\t\t\t\tID: accountID,\n\t\t\t}\n\t\t\taccountAlias, err := client.GetAccountAlias(ctx)\n\t\t\tif err != nil {\n\t\t\t\ts.logger.Warn(\"Failed to get optional account alias from account\", \"err\", err, \"account_id\", accountID)\n\t\t\t} else {\n\t\t\t\ta.Alias = accountAlias\n\t\t\t}\n\t\t\treturn a, nil\n\t\t})\n\t})\n\n\tvar wg sync.WaitGroup\n\tmux := &sync.Mutex{}\n\tjobErrors := make([]Error, 0)\n\tmetricResults := make([]model.CloudwatchMetricResult, 0)\n\tresourceResults := make([]model.TaggedResourceResult, 0)\n\ts.logger.Debug(\"Starting job runs\")\n\n\tjobConfigVisitor(s.jobsCfg, func(job any, role model.Role, region string) {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar namespace string\n\t\t\tjobAction(s.logger, job, func(job model.DiscoveryJob) {\n\t\t\t\tnamespace = job.Namespace\n\t\t\t}, func(job model.CustomNamespaceJob) {\n\t\t\t\tnamespace = job.Namespace\n\t\t\t})\n\t\t\tjobContext := JobContext{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tRegion:    region,\n\t\t\t\tRoleARN:   role.RoleArn,\n\t\t\t}\n\t\t\tjobLogger := s.logger.With(\"namespace\", jobContext.Namespace, \"region\", jobContext.Region, \"arn\", jobContext.RoleARN)\n\n\t\t\taccount, err := roleRegionToAccount[role][region]()\n\t\t\tif err != nil {\n\t\t\t\tjobError := NewError(jobContext, AccountErr, err)\n\t\t\t\tmux.Lock()\n\t\t\t\tjobErrors = append(jobErrors, jobError)\n\t\t\t\tmux.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjobContext.Account = account\n\t\t\tjobLogger = jobLogger.With(\"account_id\", jobContext.Account.ID)\n\n\t\t\tvar jobToRun cloudwatchrunner.Job\n\t\t\tjobAction(jobLogger, job,\n\t\t\t\tfunc(job model.DiscoveryJob) {\n\t\t\t\t\tjobLogger.Debug(\"Starting resource discovery\")\n\t\t\t\t\trmRunner := s.runnerFactory.NewResourceMetadataRunner(jobLogger, region, role)\n\t\t\t\t\tresources, err := rmRunner.Run(ctx, region, job)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobError := NewError(jobContext, ResourceMetadataErr, err)\n\t\t\t\t\t\tmux.Lock()\n\t\t\t\t\t\tjobErrors = append(jobErrors, jobError)\n\t\t\t\t\t\tmux.Unlock()\n\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif len(resources) > 0 {\n\t\t\t\t\t\tresult := model.TaggedResourceResult{\n\t\t\t\t\t\t\tContext: jobContext.ToScrapeContext(job.CustomTags),\n\t\t\t\t\t\t\tData:    resources,\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmux.Lock()\n\t\t\t\t\t\tresourceResults = append(resourceResults, result)\n\t\t\t\t\t\tmux.Unlock()\n\t\t\t\t\t} else {\n\t\t\t\t\t\tjobLogger.Debug(\"No tagged resources\")\n\t\t\t\t\t}\n\t\t\t\t\tjobLogger.Debug(\"Resource discovery finished\", \"number_of_discovered_resources\", len(resources))\n\n\t\t\t\t\tjobToRun = cloudwatchrunner.DiscoveryJob{Job: job, Resources: resources}\n\t\t\t\t}, func(job model.CustomNamespaceJob) {\n\t\t\t\t\tjobToRun = cloudwatchrunner.CustomNamespaceJob{Job: job}\n\t\t\t\t},\n\t\t\t)\n\t\t\tif jobToRun == nil {\n\t\t\t\tjobLogger.Debug(\"Ending job run early due to job error see job errors\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tjobLogger.Debug(\"Starting cloudwatch metrics runner\")\n\t\t\tcwRunner := s.runnerFactory.NewCloudWatchRunner(jobLogger, region, role, jobToRun)\n\t\t\tmetricResult, err := cwRunner.Run(ctx)\n\t\t\tif err != nil {\n\t\t\t\tjobError := NewError(jobContext, CloudWatchCollectionErr, err)\n\t\t\t\tmux.Lock()\n\t\t\t\tjobErrors = append(jobErrors, jobError)\n\t\t\t\tmux.Unlock()\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(metricResult) == 0 {\n\t\t\t\tjobLogger.Debug(\"No metrics data found\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tjobLogger.Debug(\"Job run finished\", \"number_of_metrics\", len(metricResult))\n\n\t\t\tresult := model.CloudwatchMetricResult{\n\t\t\t\tContext: jobContext.ToScrapeContext(jobToRun.CustomTags()),\n\t\t\t\tData:    metricResult,\n\t\t\t}\n\n\t\t\tmux.Lock()\n\t\t\tdefer mux.Unlock()\n\t\t\tmetricResults = append(metricResults, result)\n\t\t}()\n\t})\n\twg.Wait()\n\ts.logger.Debug(\"Finished job runs\", \"resource_results\", len(resourceResults), \"metric_results\", len(metricResults))\n\treturn resourceResults, metricResults, jobErrors\n}\n\n// Walk through each custom namespace and discovery jobs and take an action\nfunc jobConfigVisitor(jobsCfg model.JobsConfig, action func(job any, role model.Role, region string)) {\n\tfor _, job := range jobsCfg.DiscoveryJobs {\n\t\tfor _, role := range job.Roles {\n\t\t\tfor _, region := range job.Regions {\n\t\t\t\taction(job, role, region)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, job := range jobsCfg.CustomNamespaceJobs {\n\t\tfor _, role := range job.Roles {\n\t\t\tfor _, region := range job.Regions {\n\t\t\t\taction(job, role, region)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Take an action depending on the job type, only supports discovery and custom job types\nfunc jobAction(logger *slog.Logger, job any, discovery func(job model.DiscoveryJob), custom func(job model.CustomNamespaceJob)) {\n\t// Type switches are free https://stackoverflow.com/a/28027945\n\tswitch typedJob := job.(type) {\n\tcase model.DiscoveryJob:\n\t\tdiscovery(typedJob)\n\tcase model.CustomNamespaceJob:\n\t\tcustom(typedJob)\n\tdefault:\n\t\tlogger.Error(\"Unexpected job type\", \"err\", fmt.Errorf(\"config type of %T is not supported\", typedJob))\n\t\treturn\n\t}\n}\n\n// JobContext exists to track data we want for logging, errors, or other output context that's learned as the job runs\n// This makes it easier to track the data additively and morph it to the final shape necessary be it a model.ScrapeContext\n// or an Error. It's an exported type for tests but is not part of the public interface\ntype JobContext struct { //nolint:revive\n\tAccount   Account\n\tNamespace string\n\tRegion    string\n\tRoleARN   string\n}\n\nfunc (jc JobContext) ToScrapeContext(customTags []model.Tag) *model.ScrapeContext {\n\treturn &model.ScrapeContext{\n\t\tAccountID:    jc.Account.ID,\n\t\tRegion:       jc.Region,\n\t\tCustomTags:   customTags,\n\t\tAccountAlias: jc.Account.Alias,\n\t}\n}\n\ntype Error struct {\n\tJobContext\n\tErrorType ErrorType\n\tErr       error\n}\n\nfunc NewError(context JobContext, errorType ErrorType, err error) Error {\n\treturn Error{\n\t\tJobContext: context,\n\t\tErrorType:  errorType,\n\t\tErr:        err,\n\t}\n}\n\nfunc (e Error) ToLoggerKeyVals() []interface{} {\n\treturn []interface{}{\n\t\t\"account_id\", e.Account.ID,\n\t\t\"namespace\", e.Namespace,\n\t\t\"region\", e.Region,\n\t\t\"role_arn\", e.RoleARN,\n\t}\n}\n"
  },
  {
    "path": "pkg/job/scraper_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job_test\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log/slog\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/r3labs/diff/v3\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/account\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/job/cloudwatchrunner\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\ntype testRunnerFactory struct {\n\tGetAccountAliasFunc func() (string, error)\n\tGetAccountFunc      func() (string, error)\n\tMetadataRunFunc     func(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error)\n\tCloudwatchRunFunc   func(ctx context.Context, job cloudwatchrunner.Job) ([]*model.CloudwatchData, error)\n}\n\nfunc (t *testRunnerFactory) GetAccountAlias(context.Context) (string, error) {\n\treturn t.GetAccountAliasFunc()\n}\n\nfunc (t *testRunnerFactory) GetAccount(context.Context) (string, error) {\n\treturn t.GetAccountFunc()\n}\n\nfunc (t *testRunnerFactory) Run(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\treturn t.MetadataRunFunc(ctx, region, job)\n}\n\nfunc (t *testRunnerFactory) GetAccountClient(string, model.Role) account.Client {\n\treturn t\n}\n\nfunc (t *testRunnerFactory) NewResourceMetadataRunner(*slog.Logger, string, model.Role) job.ResourceMetadataRunner {\n\treturn &testMetadataRunner{RunFunc: t.MetadataRunFunc}\n}\n\nfunc (t *testRunnerFactory) NewCloudWatchRunner(_ *slog.Logger, _ string, _ model.Role, job cloudwatchrunner.Job) job.CloudwatchRunner {\n\treturn &testCloudwatchRunner{Job: job, RunFunc: t.CloudwatchRunFunc}\n}\n\ntype testMetadataRunner struct {\n\tRunFunc func(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error)\n}\n\nfunc (t testMetadataRunner) Run(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\treturn t.RunFunc(ctx, region, job)\n}\n\ntype testCloudwatchRunner struct {\n\tRunFunc func(ctx context.Context, job cloudwatchrunner.Job) ([]*model.CloudwatchData, error)\n\tJob     cloudwatchrunner.Job\n}\n\nfunc (t testCloudwatchRunner) Run(ctx context.Context) ([]*model.CloudwatchData, error) {\n\treturn t.RunFunc(ctx, t.Job)\n}\n\nfunc TestScrapeRunner_Run(t *testing.T) {\n\ttests := []struct {\n\t\tname                string\n\t\tjobsCfg             model.JobsConfig\n\t\tgetAccountFunc      func() (string, error)\n\t\tgetAccountAliasFunc func() (string, error)\n\t\tmetadataRunFunc     func(ctx context.Context, region string, job model.DiscoveryJob) ([]*model.TaggedResource, error)\n\t\tcloudwatchRunFunc   func(ctx context.Context, job cloudwatchrunner.Job) ([]*model.CloudwatchData, error)\n\t\texpectedResources   []model.TaggedResourceResult\n\t\texpectedMetrics     []model.CloudwatchMetricResult\n\t\texpectedErrs        []job.Error\n\t}{\n\t\t{\n\t\t\tname: \"can run a discovery job\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) {\n\t\t\t\treturn \"my-aws-account\", nil\n\t\t\t},\n\t\t\tmetadataRunFunc: func(_ context.Context, _ string, _ model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\t\t\t\treturn []*model.TaggedResource{{\n\t\t\t\t\tARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t}}, nil\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, _ cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedResources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{ARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"can run a custom namespace job\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-2\"},\n\t\t\t\t\t\tName:      \"my-custom-job\",\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-2\", ExternalID: \"external-id-2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) {\n\t\t\t\treturn \"my-aws-account\", nil\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, _ cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-2\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"can run a discovery and custom namespace job\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-2\"},\n\t\t\t\t\t\tName:      \"my-custom-job\",\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-2\", ExternalID: \"external-id-2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) {\n\t\t\t\treturn \"my-aws-account\", nil\n\t\t\t},\n\t\t\tmetadataRunFunc: func(_ context.Context, _ string, _ model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\t\t\t\treturn []*model.TaggedResource{{\n\t\t\t\t\tARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t}}, nil\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, job cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\tif job.Namespace() == \"custom-namespace\" {\n\t\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil\n\t\t\t\t}\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedResources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{ARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-2\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"returns errors from GetAccounts\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-2\"},\n\t\t\t\t\t\tName:      \"my-custom-job\",\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-2\", ExternalID: \"external-id-2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"\", errors.New(\"failed to get account\")\n\t\t\t},\n\t\t\texpectedErrs: []job.Error{\n\t\t\t\t{JobContext: job.JobContext{Account: job.Account{}, Namespace: \"aws-namespace\", Region: \"us-east-1\", RoleARN: \"aws-arn-1\"}, ErrorType: job.AccountErr},\n\t\t\t\t{JobContext: job.JobContext{Account: job.Account{}, Namespace: \"custom-namespace\", Region: \"us-east-2\", RoleARN: \"aws-arn-2\"}, ErrorType: job.AccountErr},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ignores errors from GetAccountAlias\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) { return \"\", errors.New(\"No alias here\") },\n\t\t\tmetadataRunFunc: func(_ context.Context, _ string, _ model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\t\t\t\treturn []*model.TaggedResource{{\n\t\t\t\t\tARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t}}, nil\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, _ cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedResources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"\"},\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{ARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"returns errors from resource discovery without failing scrape\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-2\"},\n\t\t\t\t\t\tName:      \"my-custom-job\",\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-2\", ExternalID: \"external-id-2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) {\n\t\t\t\treturn \"my-aws-account\", nil\n\t\t\t},\n\t\t\tmetadataRunFunc: func(_ context.Context, _ string, _ model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\t\t\t\treturn nil, errors.New(\"I failed you\")\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, _ cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-2\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-2\",\n\t\t\t\t\t\t\tResourceName:        \"resource-2\",\n\t\t\t\t\t\t\tNamespace:           \"custom-namespace\",\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension2\", Value: \"value2\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Minimum\", DataPoints: []model.DataPoint{{Value: aws.Float64(2.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrs: []job.Error{\n\t\t\t\t{\n\t\t\t\t\tJobContext: job.JobContext{\n\t\t\t\t\t\tAccount:   job.Account{ID: \"aws-account-1\", Alias: \"my-aws-account\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\tRoleARN:   \"aws-arn-1\",\n\t\t\t\t\t},\n\t\t\t\t\tErrorType: job.ResourceMetadataErr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"returns errors from cloudwatch metrics runner without failing scrape\",\n\t\t\tjobsCfg: model.JobsConfig{\n\t\t\t\tDiscoveryJobs: []model.DiscoveryJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-1\"},\n\t\t\t\t\t\tNamespace: \"aws-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-1\", ExternalID: \"external-id-1\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCustomNamespaceJobs: []model.CustomNamespaceJob{\n\t\t\t\t\t{\n\t\t\t\t\t\tRegions:   []string{\"us-east-2\"},\n\t\t\t\t\t\tName:      \"my-custom-job\",\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRoles: []model.Role{\n\t\t\t\t\t\t\t{RoleArn: \"aws-arn-2\", ExternalID: \"external-id-2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tgetAccountFunc: func() (string, error) {\n\t\t\t\treturn \"aws-account-1\", nil\n\t\t\t},\n\t\t\tgetAccountAliasFunc: func() (string, error) {\n\t\t\t\treturn \"my-aws-account\", nil\n\t\t\t},\n\t\t\tmetadataRunFunc: func(_ context.Context, _ string, _ model.DiscoveryJob) ([]*model.TaggedResource, error) {\n\t\t\t\treturn []*model.TaggedResource{{\n\t\t\t\t\tARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t}}, nil\n\t\t\t},\n\t\t\tcloudwatchRunFunc: func(_ context.Context, job cloudwatchrunner.Job) ([]*model.CloudwatchData, error) {\n\t\t\t\tif job.Namespace() == \"custom-namespace\" {\n\t\t\t\t\treturn nil, errors.New(\"I failed you\")\n\t\t\t\t}\n\t\t\t\treturn []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t},\n\t\t\texpectedResources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{ARN: \"resource-1\", Namespace: \"aws-namespace\", Region: \"us-east-1\", Tags: []model.Tag{{Key: \"tag1\", Value: \"value1\"}}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedMetrics: []model.CloudwatchMetricResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{Region: \"us-east-1\", AccountID: \"aws-account-1\", AccountAlias: \"my-aws-account\"},\n\t\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMetricName:          \"metric-1\",\n\t\t\t\t\t\t\tResourceName:        \"resource-1\",\n\t\t\t\t\t\t\tNamespace:           \"aws-namespace\",\n\t\t\t\t\t\t\tTags:                []model.Tag{{Key: \"tag1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tDimensions:          []model.Dimension{{Name: \"dimension1\", Value: \"value1\"}},\n\t\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{Statistic: \"Maximum\", DataPoints: []model.DataPoint{{Value: aws.Float64(1.0), Timestamp: time.Time{}}}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrs: []job.Error{\n\t\t\t\t{\n\t\t\t\t\tJobContext: job.JobContext{\n\t\t\t\t\t\tAccount:   job.Account{ID: \"aws-account-1\", Alias: \"my-aws-account\"},\n\t\t\t\t\t\tNamespace: \"custom-namespace\",\n\t\t\t\t\t\tRegion:    \"us-east-2\",\n\t\t\t\t\t\tRoleARN:   \"aws-arn-2\",\n\t\t\t\t\t},\n\t\t\t\t\tErrorType: job.CloudWatchCollectionErr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\trf := testRunnerFactory{\n\t\t\t\tGetAccountFunc:      tc.getAccountFunc,\n\t\t\t\tGetAccountAliasFunc: tc.getAccountAliasFunc,\n\t\t\t\tMetadataRunFunc:     tc.metadataRunFunc,\n\t\t\t\tCloudwatchRunFunc:   tc.cloudwatchRunFunc,\n\t\t\t}\n\t\t\tlvl := promslog.NewLevel()\n\t\t\t_ = lvl.Set(\"debug\")\n\t\t\tsr := job.NewScraper(promslog.New(&promslog.Config{Level: lvl}), tc.jobsCfg, &rf)\n\t\t\tresources, metrics, errs := sr.Scrape(context.Background())\n\n\t\t\tchangelog, err := diff.Diff(tc.expectedResources, resources)\n\t\t\tassert.NoError(t, err, \"failed to diff resources\")\n\t\t\tassert.Len(t, changelog, 0, changelog)\n\n\t\t\tchangelog, err = diff.Diff(tc.expectedMetrics, metrics)\n\t\t\tassert.NoError(t, err, \"failed to diff metrics\")\n\t\t\tassert.Len(t, changelog, 0, changelog)\n\n\t\t\t// We don't want to check the exact error just the message\n\t\t\tchangelog, err = diff.Diff(tc.expectedErrs, errs, diff.Filter(func(_ []string, _ reflect.Type, field reflect.StructField) bool {\n\t\t\t\treturn field.Name != \"Err\"\n\t\t\t}))\n\t\t\tassert.NoError(t, err, \"failed to diff errs\")\n\t\t\tassert.Len(t, changelog, 0, changelog)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/job/static.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage job\n\nimport (\n\t\"context\"\n\t\"log/slog\"\n\t\"sync\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/clients/cloudwatch\"\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc runStaticJob(\n\tctx context.Context,\n\tlogger *slog.Logger,\n\tresource model.StaticJob,\n\tclientCloudwatch cloudwatch.Client,\n) []*model.CloudwatchData {\n\tcw := []*model.CloudwatchData{}\n\tmux := &sync.Mutex{}\n\tvar wg sync.WaitGroup\n\n\tfor j := range resource.Metrics {\n\t\tmetric := resource.Metrics[j]\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tdata := model.CloudwatchData{\n\t\t\t\tMetricName:   metric.Name,\n\t\t\t\tResourceName: resource.Name,\n\t\t\t\tNamespace:    resource.Namespace,\n\t\t\t\tDimensions:   createStaticDimensions(resource.Dimensions),\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              metric.NilToZero,\n\t\t\t\t\tAddCloudwatchTimestamp: metric.AddCloudwatchTimestamp,\n\t\t\t\t},\n\t\t\t\tTags:                          nil,\n\t\t\t\tGetMetricDataProcessingParams: nil,\n\t\t\t\tGetMetricDataResult:           nil,\n\t\t\t\tGetMetricStatisticsResult:     nil,\n\t\t\t}\n\n\t\t\tdata.GetMetricStatisticsResult = &model.GetMetricStatisticsResult{\n\t\t\t\tResults:    clientCloudwatch.GetMetricStatistics(ctx, logger, data.Dimensions, resource.Namespace, metric),\n\t\t\t\tStatistics: metric.Statistics,\n\t\t\t}\n\n\t\t\tif data.GetMetricStatisticsResult.Results != nil {\n\t\t\t\tmux.Lock()\n\t\t\t\tcw = append(cw, &data)\n\t\t\t\tmux.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\treturn cw\n}\n\nfunc createStaticDimensions(dimensions []model.Dimension) []model.Dimension {\n\tout := make([]model.Dimension, 0, len(dimensions))\n\tfor _, d := range dimensions {\n\t\tout = append(out, model.Dimension{\n\t\t\tName:  d.Name,\n\t\t\tValue: d.Value,\n\t\t})\n\t}\n\n\treturn out\n}\n"
  },
  {
    "path": "pkg/model/model.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage model\n\nimport (\n\t\"time\"\n\n\t\"github.com/grafana/regexp\"\n)\n\nconst (\n\tDefaultPeriodSeconds = int64(300)\n\tDefaultLengthSeconds = int64(300)\n)\n\ntype JobsConfig struct {\n\tStsRegion           string\n\tDiscoveryJobs       []DiscoveryJob\n\tStaticJobs          []StaticJob\n\tCustomNamespaceJobs []CustomNamespaceJob\n}\n\ntype DiscoveryJob struct {\n\tRegions                     []string\n\tNamespace                   string\n\tRoles                       []Role\n\tSearchTags                  []SearchTag\n\tCustomTags                  []Tag\n\tDimensionNameRequirements   []string\n\tMetrics                     []*MetricConfig\n\tRoundingPeriod              *int64\n\tRecentlyActiveOnly          bool\n\tExportedTagsOnMetrics       []string\n\tIncludeContextOnInfoMetrics bool\n\tDimensionsRegexps           []DimensionsRegexp\n\n\t// EnhancedMetrics holds configuration for enhanced metrics in discovery jobs. It contains a configuration for the non-CloudWatch metrics to collect.\n\tEnhancedMetrics []*EnhancedMetricConfig\n}\n\nfunc (d *DiscoveryJob) HasEnhancedMetrics() bool {\n\treturn len(d.EnhancedMetrics) > 0\n}\n\ntype EnhancedMetricConfig struct {\n\tName string\n}\n\ntype StaticJob struct {\n\tName       string\n\tRegions    []string\n\tRoles      []Role\n\tNamespace  string\n\tCustomTags []Tag\n\tDimensions []Dimension\n\tMetrics    []*MetricConfig\n}\n\ntype CustomNamespaceJob struct {\n\tRegions                   []string\n\tName                      string\n\tNamespace                 string\n\tRoundingPeriod            *int64\n\tRecentlyActiveOnly        bool\n\tRoles                     []Role\n\tMetrics                   []*MetricConfig\n\tCustomTags                []Tag\n\tDimensionNameRequirements []string\n}\n\ntype Role struct {\n\tRoleArn    string\n\tExternalID string\n}\n\ntype MetricConfig struct {\n\tName                   string\n\tStatistics             []string\n\tPeriod                 int64\n\tLength                 int64\n\tDelay                  int64\n\tNilToZero              bool\n\tAddCloudwatchTimestamp bool\n\tExportAllDataPoints    bool\n}\n\ntype DimensionsRegexp struct {\n\tRegexp          *regexp.Regexp\n\tDimensionsNames []string\n}\n\ntype LabelSet map[string]struct{}\n\ntype Tag struct {\n\tKey   string\n\tValue string\n}\n\ntype SearchTag struct {\n\tKey   string\n\tValue *regexp.Regexp\n}\n\ntype Dimension struct {\n\tName  string\n\tValue string\n}\n\ntype Metric struct {\n\t// The dimensions for the metric.\n\tDimensions []Dimension\n\tMetricName string\n\tNamespace  string\n}\n\ntype CloudwatchMetricResult struct {\n\tContext *ScrapeContext\n\tData    []*CloudwatchData\n}\n\ntype TaggedResourceResult struct {\n\tContext *ScrapeContext\n\tData    []*TaggedResource\n}\n\ntype ScrapeContext struct {\n\tRegion       string\n\tAccountID    string\n\tAccountAlias string\n\tCustomTags   []Tag\n}\n\n// CloudwatchData is an internal representation of a CloudWatch\n// metric with attached data points, metric and resource information.\ntype CloudwatchData struct {\n\tMetricName string\n\t// ResourceName will have different values depending on the job type\n\t// DiscoveryJob = Resource ARN associated with the metric or global when it could not be associated but shouldn't be dropped\n\t// StaticJob = Resource Name from static job config\n\t// CustomNamespace = Custom Namespace job name\n\tResourceName string\n\tNamespace    string\n\tTags         []Tag\n\tDimensions   []Dimension\n\t// GetMetricDataProcessingParams includes necessary fields to run GetMetricData\n\tGetMetricDataProcessingParams *GetMetricDataProcessingParams\n\n\t// MetricMigrationParams holds configuration values necessary when migrating the resulting metrics\n\tMetricMigrationParams MetricMigrationParams\n\n\t// GetMetricsDataResult is an optional field and will be non-nil when metric data was populated from the GetMetricsData API (Discovery and CustomNamespace jobs)\n\tGetMetricDataResult *GetMetricDataResult\n\n\t// GetMetricStatisticsResult is an optional field and will be non-nil when metric data was populated from the GetMetricStatistics API (static jobs)\n\tGetMetricStatisticsResult *GetMetricStatisticsResult\n}\n\ntype GetMetricStatisticsResult struct {\n\tResults    []*MetricStatisticsResult\n\tStatistics []string\n}\n\ntype MetricStatisticsResult struct {\n\t// The average of the metric values that correspond to the data point.\n\tAverage *float64\n\n\t// The percentile statistic for the data point.\n\tExtendedStatistics map[string]*float64\n\n\t// The maximum metric value for the data point.\n\tMaximum *float64\n\n\t// The minimum metric value for the data point.\n\tMinimum *float64\n\n\t// The number of metric values that contributed to the aggregate value of this\n\t// data point.\n\tSampleCount *float64\n\n\t// The sum of the metric values for the data point.\n\tSum *float64\n\n\t// The time stamp used for the data point.\n\tTimestamp *time.Time\n}\n\ntype GetMetricDataProcessingParams struct {\n\t// QueryID is a value internal to processing used for mapping results from GetMetricData their original request\n\tQueryID string\n\n\t// The statistic to be used to call GetMetricData\n\tStatistic string\n\n\t// Fields which impact the start and endtime for\n\tPeriod int64\n\tLength int64\n\tDelay  int64\n}\n\ntype MetricMigrationParams struct {\n\tNilToZero              bool\n\tAddCloudwatchTimestamp bool\n\tExportAllDataPoints    bool\n}\n\ntype GetMetricDataResult struct {\n\tStatistic  string\n\tDataPoints []DataPoint\n}\n\ntype DataPoint struct {\n\tValue     *float64\n\tTimestamp time.Time\n}\n\n// TaggedResource is an AWS resource with tags\ntype TaggedResource struct {\n\t// ARN is the unique AWS ARN (Amazon Resource Name) of the resource\n\tARN string\n\n\t// Namespace identifies the resource type (e.g. EC2)\n\tNamespace string\n\n\t// Region is the AWS regions that the resource belongs to\n\tRegion string\n\n\t// Tags is a set of tags associated to the resource\n\tTags []Tag\n}\n\n// FilterThroughTags returns true if all filterTags match\n// with tags of the TaggedResource, returns false otherwise.\nfunc (r TaggedResource) FilterThroughTags(filterTags []SearchTag) bool {\n\tif len(filterTags) == 0 {\n\t\treturn true\n\t}\n\n\ttagFilterMatches := 0\n\n\tfor _, resourceTag := range r.Tags {\n\t\tfor _, filterTag := range filterTags {\n\t\t\tif resourceTag.Key == filterTag.Key {\n\t\t\t\tif !filterTag.Value.MatchString(resourceTag.Value) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\t// A resource needs to match all SearchTags to be returned, so we track the number of tag filter\n\t\t\t\t// matches to ensure it matches the number of tag filters at the end\n\t\t\t\ttagFilterMatches++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tagFilterMatches == len(filterTags)\n}\n\n// MetricTags returns a list of tags built from the tags of\n// TaggedResource, if exportedTags is not empty.\n//\n// Returned tags have as key the key from exportedTags, and\n// as value the value from the corresponding tag of the resource,\n// if it exists (otherwise an empty string).\nfunc (r TaggedResource) MetricTags(exportedTags []string) []Tag {\n\tif len(exportedTags) == 0 {\n\t\treturn []Tag{}\n\t}\n\n\ttags := make([]Tag, 0, len(exportedTags))\n\tfor _, tagName := range exportedTags {\n\t\ttag := Tag{\n\t\t\tKey: tagName,\n\t\t}\n\t\tfor _, resourceTag := range r.Tags {\n\t\t\tif resourceTag.Key == tagName {\n\t\t\t\ttag.Value = resourceTag.Value\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Always add the tag, even if it's empty, to ensure the same labels are present on all metrics for a single service\n\t\ttags = append(tags, tag)\n\t}\n\treturn tags\n}\n"
  },
  {
    "path": "pkg/model/model_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage model\n\nimport (\n\t\"testing\"\n\n\t\"github.com/grafana/regexp\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc Test_FilterThroughTags(t *testing.T) {\n\ttestCases := []struct {\n\t\ttestName     string\n\t\tresourceTags []Tag\n\t\tfilterTags   []SearchTag\n\t\tresult       bool\n\t}{\n\t\t{\n\t\t\ttestName: \"exactly matching tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t{\n\t\t\ttestName: \"unmatching tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k2\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v2\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t{\n\t\t\ttestName: \"resource has more tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k2\",\n\t\t\t\t\tValue: \"v2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t{\n\t\t\ttestName: \"filter has more tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v1\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k2\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v2\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t{\n\t\t\ttestName: \"unmatching tag key\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k2\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t{\n\t\t\ttestName: \"unmatching tag value\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v2\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t{\n\t\t\ttestName:     \"resource without tags\",\n\t\t\tresourceTags: []Tag{},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v2\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t{\n\t\t\ttestName: \"empty filter tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{},\n\t\t\tresult:     true,\n\t\t},\n\t\t{\n\t\t\ttestName: \"filter with value regex\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfilterTags: []SearchTag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: regexp.MustCompile(\"v.*\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.testName, func(t *testing.T) {\n\t\t\tres := TaggedResource{\n\t\t\t\tARN:       \"aws::arn\",\n\t\t\t\tNamespace: \"AWS/Service\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags:      tc.resourceTags,\n\t\t\t}\n\t\t\trequire.Equal(t, tc.result, res.FilterThroughTags(tc.filterTags))\n\t\t})\n\t}\n}\n\nfunc Test_MetricTags(t *testing.T) {\n\ttestCases := []struct {\n\t\ttestName     string\n\t\tresourceTags []Tag\n\t\texportedTags []string\n\t\tresult       []Tag\n\t}{\n\t\t{\n\t\t\ttestName: \"empty exported tag\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texportedTags: []string{},\n\t\t\tresult:       []Tag{},\n\t\t},\n\t\t{\n\t\t\ttestName: \"single exported tag\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texportedTags: []string{\"k1\"},\n\t\t\tresult: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttestName: \"multiple exported tags\",\n\t\t\tresourceTags: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texportedTags: []string{\"k1\", \"k2\"},\n\t\t\tresult: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"v1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k2\",\n\t\t\t\t\tValue: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttestName:     \"resource without tags\",\n\t\t\tresourceTags: []Tag{},\n\t\t\texportedTags: []string{\"k1\"},\n\t\t\tresult: []Tag{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"k1\",\n\t\t\t\t\tValue: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.testName, func(t *testing.T) {\n\t\t\tres := TaggedResource{\n\t\t\t\tARN:       \"aws::arn\",\n\t\t\t\tNamespace: \"AWS/Service\",\n\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\tTags:      tc.resourceTags,\n\t\t\t}\n\n\t\t\trequire.Equal(t, tc.result, res.MetricTags(tc.exportedTags))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/promutil/migrate.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage promutil\n\nimport (\n\t\"fmt\"\n\t\"log/slog\"\n\t\"maps\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/grafana/regexp\"\n\tprom_model \"github.com/prometheus/common/model\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nvar Percentile = regexp.MustCompile(`^p(\\d{1,2}(\\.\\d{0,2})?|100)$`)\n\nfunc BuildMetricName(namespace, metricName, statistic string) string {\n\tsb := strings.Builder{}\n\n\t// Some namespaces have a leading forward slash like\n\t// /aws/sagemaker/TrainingJobs, which should be removed.\n\tvar promNs string\n\tif strings.HasPrefix(namespace, \"/\") {\n\t\tpromNs = PromString(strings.ToLower(namespace[1:]))\n\t} else {\n\t\tpromNs = PromString(strings.ToLower(namespace))\n\t}\n\n\tif !strings.HasPrefix(promNs, \"aws\") {\n\t\tsb.WriteString(\"aws_\")\n\t}\n\tsb.WriteString(promNs)\n\n\tsb.WriteString(\"_\")\n\n\tpromMetricName := PromString(metricName)\n\t// Some metric names duplicate parts of the namespace as a prefix,\n\t// For example, the `Glue` namespace metrics have names prefixed also by `glue``\n\tskip := 0\n\tfor _, part := range strings.Split(promNs, \"_\") {\n\t\tif strings.HasPrefix(promMetricName[skip:], part) {\n\t\t\tskip = len(part)\n\t\t}\n\t}\n\tpromMetricName = strings.TrimPrefix(promMetricName[skip:], \"_\")\n\n\tsb.WriteString(promMetricName)\n\tif statistic != \"\" {\n\t\tsb.WriteString(\"_\")\n\t\tPromStringToBuilder(statistic, &sb)\n\t}\n\treturn sb.String()\n}\n\nfunc BuildNamespaceInfoMetrics(tagData []model.TaggedResourceResult, metrics []*PrometheusMetric, observedMetricLabels map[string]model.LabelSet, labelsSnakeCase bool, logger *slog.Logger) ([]*PrometheusMetric, map[string]model.LabelSet) {\n\tfor _, tagResult := range tagData {\n\t\tcontextLabels := contextToLabels(tagResult.Context, labelsSnakeCase, logger)\n\t\tfor _, d := range tagResult.Data {\n\t\t\tmetricName := BuildMetricName(d.Namespace, \"info\", \"\")\n\n\t\t\tpromLabels := make(map[string]string, len(d.Tags)+len(contextLabels)+1)\n\t\t\tmaps.Copy(promLabels, contextLabels)\n\t\t\tpromLabels[\"name\"] = d.ARN\n\t\t\tfor _, tag := range d.Tags {\n\t\t\t\tok, promTag := PromStringTag(tag.Key, labelsSnakeCase)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Warn(\"tag name is an invalid prometheus label name\", \"tag\", tag.Key)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlabelName := \"tag_\" + promTag\n\t\t\t\tpromLabels[labelName] = tag.Value\n\t\t\t}\n\n\t\t\tobservedMetricLabels = recordLabelsForMetric(metricName, promLabels, observedMetricLabels)\n\t\t\tmetrics = append(metrics, &PrometheusMetric{\n\t\t\t\tName:   metricName,\n\t\t\t\tLabels: promLabels,\n\t\t\t\tValue:  0,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn metrics, observedMetricLabels\n}\n\nfunc BuildMetrics(results []model.CloudwatchMetricResult, labelsSnakeCase bool, logger *slog.Logger) ([]*PrometheusMetric, map[string]model.LabelSet, error) {\n\toutput := make([]*PrometheusMetric, 0)\n\tobservedMetricLabels := make(map[string]model.LabelSet)\n\n\tfor _, result := range results {\n\t\tcontextLabels := contextToLabels(result.Context, labelsSnakeCase, logger)\n\t\tfor _, metric := range result.Data {\n\t\t\t// This should not be possible but check just in case\n\t\t\tif metric.GetMetricStatisticsResult == nil && metric.GetMetricDataResult == nil {\n\t\t\t\tlogger.Warn(\"Attempted to migrate metric with no result\", \"namespace\", metric.Namespace, \"metric_name\", metric.MetricName, \"resource_name\", metric.ResourceName)\n\t\t\t}\n\n\t\t\tfor _, statistic := range statisticsInCloudwatchData(metric) {\n\t\t\t\tdataPoints, err := getDataPoints(metric, statistic)\n\t\t\t\tfor _, dataPoint := range dataPoints {\n\t\t\t\t\tts := dataPoint.Timestamp\n\t\t\t\t\tdataPoint := dataPoint.Value\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, nil, err\n\t\t\t\t\t}\n\t\t\t\t\tvar exportedDatapoint float64\n\t\t\t\t\tif dataPoint == nil && metric.MetricMigrationParams.AddCloudwatchTimestamp {\n\t\t\t\t\t\t// If we did not get a datapoint then the timestamp is a default value making it unusable in the\n\t\t\t\t\t\t// exported metric. Attempting to put a fake timestamp on the metric will likely conflict with\n\t\t\t\t\t\t// future CloudWatch timestamps which are always in the past.\n\t\t\t\t\t\tif metric.MetricMigrationParams.ExportAllDataPoints {\n\t\t\t\t\t\t\t// If we're exporting all data points, we can skip this one and check for a historical datapoint\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// If we are not exporting all data points, we better have nothing exported\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif dataPoint == nil {\n\t\t\t\t\t\texportedDatapoint = math.NaN()\n\t\t\t\t\t} else {\n\t\t\t\t\t\texportedDatapoint = *dataPoint\n\t\t\t\t\t}\n\n\t\t\t\t\tif metric.MetricMigrationParams.NilToZero && math.IsNaN(exportedDatapoint) {\n\t\t\t\t\t\texportedDatapoint = 0\n\t\t\t\t\t}\n\n\t\t\t\t\tname := BuildMetricName(metric.Namespace, metric.MetricName, statistic)\n\n\t\t\t\t\tpromLabels := createPrometheusLabels(metric, labelsSnakeCase, contextLabels, logger)\n\t\t\t\t\tobservedMetricLabels = recordLabelsForMetric(name, promLabels, observedMetricLabels)\n\n\t\t\t\t\tif !metric.MetricMigrationParams.AddCloudwatchTimestamp {\n\t\t\t\t\t\t// if we're not adding the original timestamp, we have to zero it so we can validate the data in the exporter via EnsureLabelConsistencyAndRemoveDuplicates\n\t\t\t\t\t\tts = time.Time{}\n\t\t\t\t\t}\n\n\t\t\t\t\toutput = append(output, &PrometheusMetric{\n\t\t\t\t\t\tName:             name,\n\t\t\t\t\t\tLabels:           promLabels,\n\t\t\t\t\t\tValue:            exportedDatapoint,\n\t\t\t\t\t\tTimestamp:        ts,\n\t\t\t\t\t\tIncludeTimestamp: metric.MetricMigrationParams.AddCloudwatchTimestamp,\n\t\t\t\t\t})\n\n\t\t\t\t\tif !metric.MetricMigrationParams.ExportAllDataPoints {\n\t\t\t\t\t\t// If we're not exporting all data points, we can skip the rest of the data points for this metric\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn output, observedMetricLabels, nil\n}\n\nfunc statisticsInCloudwatchData(d *model.CloudwatchData) []string {\n\tif d.GetMetricDataResult != nil {\n\t\treturn []string{d.GetMetricDataResult.Statistic}\n\t}\n\tif d.GetMetricStatisticsResult != nil {\n\t\treturn d.GetMetricStatisticsResult.Statistics\n\t}\n\treturn []string{}\n}\n\nfunc getDataPoints(cwd *model.CloudwatchData, statistic string) ([]model.DataPoint, error) {\n\t// Not possible but for sanity\n\tif cwd.GetMetricStatisticsResult == nil && cwd.GetMetricDataResult == nil {\n\t\treturn nil, fmt.Errorf(\"cannot map a data point with no results on %s\", cwd.MetricName)\n\t}\n\n\tif cwd.GetMetricDataResult != nil {\n\t\t// If we have no dataPoints, we should return a single nil datapoint, which is then either dropped or converted to 0\n\t\tif len(cwd.GetMetricDataResult.DataPoints) == 0 && !cwd.MetricMigrationParams.AddCloudwatchTimestamp {\n\t\t\treturn []model.DataPoint{{\n\t\t\t\tValue:     nil,\n\t\t\t\tTimestamp: time.Time{},\n\t\t\t}}, nil\n\t\t}\n\n\t\treturn cwd.GetMetricDataResult.DataPoints, nil\n\t}\n\n\tvar averageDataPoints []*model.MetricStatisticsResult\n\n\t// sorting by timestamps so we can consistently export the most updated datapoint\n\t// assuming Timestamp field in cloudwatch.Value struct is never nil\n\tfor _, datapoint := range sortByTimestamp(cwd.GetMetricStatisticsResult.Results) {\n\t\tswitch {\n\t\tcase statistic == \"Maximum\":\n\t\t\tif datapoint.Maximum != nil {\n\t\t\t\treturn []model.DataPoint{{Value: datapoint.Maximum, Timestamp: *datapoint.Timestamp}}, nil\n\t\t\t}\n\t\tcase statistic == \"Minimum\":\n\t\t\tif datapoint.Minimum != nil {\n\t\t\t\treturn []model.DataPoint{{Value: datapoint.Minimum, Timestamp: *datapoint.Timestamp}}, nil\n\t\t\t}\n\t\tcase statistic == \"Sum\":\n\t\t\tif datapoint.Sum != nil {\n\t\t\t\treturn []model.DataPoint{{Value: datapoint.Sum, Timestamp: *datapoint.Timestamp}}, nil\n\t\t\t}\n\t\tcase statistic == \"SampleCount\":\n\t\t\tif datapoint.SampleCount != nil {\n\t\t\t\treturn []model.DataPoint{{Value: datapoint.SampleCount, Timestamp: *datapoint.Timestamp}}, nil\n\t\t\t}\n\t\tcase statistic == \"Average\":\n\t\t\tif datapoint.Average != nil {\n\t\t\t\taverageDataPoints = append(averageDataPoints, datapoint)\n\t\t\t}\n\t\tcase Percentile.MatchString(statistic):\n\t\t\tif data, ok := datapoint.ExtendedStatistics[statistic]; ok {\n\t\t\t\treturn []model.DataPoint{{Value: data, Timestamp: *datapoint.Timestamp}}, nil\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid statistic requested on metric %s: %s\", cwd.MetricName, statistic)\n\t\t}\n\t}\n\n\tif len(averageDataPoints) > 0 {\n\t\tvar total float64\n\t\tvar timestamp time.Time\n\n\t\tfor _, p := range averageDataPoints {\n\t\t\tif p.Timestamp.After(timestamp) {\n\t\t\t\ttimestamp = *p.Timestamp\n\t\t\t}\n\t\t\ttotal += *p.Average\n\t\t}\n\t\taverage := total / float64(len(averageDataPoints))\n\t\treturn []model.DataPoint{{Value: &average, Timestamp: timestamp}}, nil\n\t}\n\treturn nil, nil\n}\n\nfunc sortByTimestamp(dataPoints []*model.MetricStatisticsResult) []*model.MetricStatisticsResult {\n\tsort.Slice(dataPoints, func(i, j int) bool {\n\t\tjTimestamp := *dataPoints[j].Timestamp\n\t\treturn dataPoints[i].Timestamp.After(jTimestamp)\n\t})\n\treturn dataPoints\n}\n\nfunc createPrometheusLabels(cwd *model.CloudwatchData, labelsSnakeCase bool, contextLabels map[string]string, logger *slog.Logger) map[string]string {\n\tlabels := make(map[string]string, len(cwd.Dimensions)+len(cwd.Tags)+len(contextLabels))\n\tlabels[\"name\"] = cwd.ResourceName\n\n\t// Inject the sfn name back as a label\n\tfor _, dimension := range cwd.Dimensions {\n\t\tok, promTag := PromStringTag(dimension.Name, labelsSnakeCase)\n\t\tif !ok {\n\t\t\tlogger.Warn(\"dimension name is an invalid prometheus label name\", \"dimension\", dimension.Name)\n\t\t\tcontinue\n\t\t}\n\t\tlabels[\"dimension_\"+promTag] = dimension.Value\n\t}\n\n\tfor _, tag := range cwd.Tags {\n\t\tok, promTag := PromStringTag(tag.Key, labelsSnakeCase)\n\t\tif !ok {\n\t\t\tlogger.Warn(\"metric tag name is an invalid prometheus label name\", \"tag\", tag.Key)\n\t\t\tcontinue\n\t\t}\n\t\tlabels[\"tag_\"+promTag] = tag.Value\n\t}\n\n\tmaps.Copy(labels, contextLabels)\n\n\treturn labels\n}\n\nfunc contextToLabels(context *model.ScrapeContext, labelsSnakeCase bool, logger *slog.Logger) map[string]string {\n\tif context == nil {\n\t\treturn map[string]string{}\n\t}\n\n\tlabels := make(map[string]string, 2+len(context.CustomTags))\n\tlabels[\"region\"] = context.Region\n\tlabels[\"account_id\"] = context.AccountID\n\t// If there's no account alias, omit adding an extra label in the series, it will work either way query wise\n\tif context.AccountAlias != \"\" {\n\t\tlabels[\"account_alias\"] = context.AccountAlias\n\t}\n\n\tfor _, label := range context.CustomTags {\n\t\tok, promTag := PromStringTag(label.Key, labelsSnakeCase)\n\t\tif !ok {\n\t\t\tlogger.Warn(\"custom tag name is an invalid prometheus label name\", \"tag\", label.Key)\n\t\t\tcontinue\n\t\t}\n\t\tlabels[\"custom_tag_\"+promTag] = label.Value\n\t}\n\n\treturn labels\n}\n\n// recordLabelsForMetric adds any missing labels from promLabels in to the LabelSet for the metric name and returns\n// the updated observedMetricLabels\nfunc recordLabelsForMetric(metricName string, promLabels map[string]string, observedMetricLabels map[string]model.LabelSet) map[string]model.LabelSet {\n\tif _, ok := observedMetricLabels[metricName]; !ok {\n\t\tobservedMetricLabels[metricName] = make(model.LabelSet, len(promLabels))\n\t}\n\tfor label := range promLabels {\n\t\tif _, ok := observedMetricLabels[metricName][label]; !ok {\n\t\t\tobservedMetricLabels[metricName][label] = struct{}{}\n\t\t}\n\t}\n\n\treturn observedMetricLabels\n}\n\n// EnsureLabelConsistencyAndRemoveDuplicates ensures that every metric has the same set of labels based on the data\n// in observedMetricLabels and that there are no duplicate metrics.\n// Prometheus requires that all metrics with the same name have the same set of labels and that no duplicates are registered\nfunc EnsureLabelConsistencyAndRemoveDuplicates(metrics []*PrometheusMetric, observedMetricLabels map[string]model.LabelSet) []*PrometheusMetric {\n\tmetricKeys := make(map[string]struct{}, len(metrics))\n\toutput := make([]*PrometheusMetric, 0, len(metrics))\n\n\tfor _, metric := range metrics {\n\t\tfor observedLabels := range observedMetricLabels[metric.Name] {\n\t\t\tif _, ok := metric.Labels[observedLabels]; !ok {\n\t\t\t\tmetric.Labels[observedLabels] = \"\"\n\t\t\t}\n\t\t}\n\n\t\t// We are including the timestamp in the metric key to ensure that we don't have duplicate metrics\n\t\t// if we have AddCloudwatchTimestamp enabled its the real timestamp, otherwise its a zero value\n\t\t// the timestamp is needed to ensure valid date created by ExportAllDataPoints\n\t\tmetricKey := fmt.Sprintf(\"%s-%d-%d\", metric.Name, prom_model.LabelsToSignature(metric.Labels), metric.Timestamp.Unix())\n\t\tif _, exists := metricKeys[metricKey]; !exists {\n\t\t\tmetricKeys[metricKey] = struct{}{}\n\t\t\toutput = append(output, metric)\n\t\t} else {\n\t\t\tDuplicateMetricsFilteredCounter.Inc()\n\t\t}\n\t}\n\n\treturn output\n}\n"
  },
  {
    "path": "pkg/promutil/migrate_test.go",
    "content": "// Copyright The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage promutil\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/prometheus/common/promslog\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/prometheus-community/yet-another-cloudwatch-exporter/pkg/model\"\n)\n\nfunc TestBuildNamespaceInfoMetrics(t *testing.T) {\n\ttype testCase struct {\n\t\tname                 string\n\t\tresources            []model.TaggedResourceResult\n\t\tmetrics              []*PrometheusMetric\n\t\tobservedMetricLabels map[string]model.LabelSet\n\t\tlabelsSnakeCase      bool\n\t\texpectedMetrics      []*PrometheusMetric\n\t\texpectedLabels       map[string]model.LabelSet\n\t}\n\ttestCases := []testCase{\n\t\t{\n\t\t\tname: \"metric with tag\",\n\t\t\tresources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: nil,\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tARN:       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"CustomTag\",\n\t\t\t\t\t\t\t\t\tValue: \"tag_Value\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetrics:              []*PrometheusMetric{},\n\t\t\tobservedMetricLabels: map[string]model.LabelSet{},\n\t\t\tlabelsSnakeCase:      false,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_elasticache_info\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":          \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"tag_CustomTag\": \"tag_Value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_info\": map[string]struct{}{\n\t\t\t\t\t\"name\":          {},\n\t\t\t\t\t\"tag_CustomTag\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"label snake case\",\n\t\t\tresources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: nil,\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tARN:       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"CustomTag\",\n\t\t\t\t\t\t\t\t\tValue: \"tag_Value\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetrics:              []*PrometheusMetric{},\n\t\t\tobservedMetricLabels: map[string]model.LabelSet{},\n\t\t\tlabelsSnakeCase:      true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_elasticache_info\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":           \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"tag_custom_tag\": \"tag_Value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_info\": map[string]struct{}{\n\t\t\t\t\t\"name\":           {},\n\t\t\t\t\t\"tag_custom_tag\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with observed metrics and labels\",\n\t\t\tresources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: nil,\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tARN:       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"CustomTag\",\n\t\t\t\t\t\t\t\t\tValue: \"tag_Value\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_ec2_cpuutilization_maximum\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":                 \"arn:aws:ec2:us-east-1:123456789012:instance/i-abc123\",\n\t\t\t\t\t\t\"dimension_InstanceId\": \"i-abc123\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedMetricLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_ec2_cpuutilization_maximum\": map[string]struct{}{\n\t\t\t\t\t\"name\":                 {},\n\t\t\t\t\t\"dimension_InstanceId\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_ec2_cpuutilization_maximum\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":                 \"arn:aws:ec2:us-east-1:123456789012:instance/i-abc123\",\n\t\t\t\t\t\t\"dimension_InstanceId\": \"i-abc123\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_elasticache_info\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":           \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"tag_custom_tag\": \"tag_Value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_ec2_cpuutilization_maximum\": map[string]struct{}{\n\t\t\t\t\t\"name\":                 {},\n\t\t\t\t\t\"dimension_InstanceId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_info\": map[string]struct{}{\n\t\t\t\t\t\"name\":           {},\n\t\t\t\t\t\"tag_custom_tag\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"context on info metrics\",\n\t\t\tresources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\t\tRegion:    \"us-east-2\",\n\t\t\t\t\t\tAccountID: \"12345\",\n\t\t\t\t\t\tCustomTags: []model.Tag{{\n\t\t\t\t\t\t\tKey:   \"billable-to\",\n\t\t\t\t\t\t\tValue: \"api\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tARN:       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"cache_name\",\n\t\t\t\t\t\t\t\t\tValue: \"cache_instance_1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetrics:              []*PrometheusMetric{},\n\t\t\tobservedMetricLabels: map[string]model.LabelSet{},\n\t\t\tlabelsSnakeCase:      true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_elasticache_info\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":                   \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"tag_cache_name\":         \"cache_instance_1\",\n\t\t\t\t\t\t\"account_id\":             \"12345\",\n\t\t\t\t\t\t\"region\":                 \"us-east-2\",\n\t\t\t\t\t\t\"custom_tag_billable_to\": \"api\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_info\": map[string]struct{}{\n\t\t\t\t\t\"name\":                   {},\n\t\t\t\t\t\"tag_cache_name\":         {},\n\t\t\t\t\t\"account_id\":             {},\n\t\t\t\t\t\"region\":                 {},\n\t\t\t\t\t\"custom_tag_billable_to\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"metric with nonstandard namespace\",\n\t\t\tresources: []model.TaggedResourceResult{\n\t\t\t\t{\n\t\t\t\t\tContext: nil,\n\t\t\t\t\tData: []*model.TaggedResource{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tARN:       \"arn:aws:sagemaker:us-east-1:123456789012:training-job/sagemaker-xgboost\",\n\t\t\t\t\t\t\tNamespace: \"/aws/sagemaker/TrainingJobs\",\n\t\t\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\t\t\tTags: []model.Tag{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"CustomTag\",\n\t\t\t\t\t\t\t\t\tValue: \"tag_Value\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tmetrics:              []*PrometheusMetric{},\n\t\t\tobservedMetricLabels: map[string]model.LabelSet{},\n\t\t\tlabelsSnakeCase:      false,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName: \"aws_sagemaker_trainingjobs_info\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\":          \"arn:aws:sagemaker:us-east-1:123456789012:training-job/sagemaker-xgboost\",\n\t\t\t\t\t\t\"tag_CustomTag\": \"tag_Value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue: 0,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_sagemaker_trainingjobs_info\": map[string]struct{}{\n\t\t\t\t\t\"name\":          {},\n\t\t\t\t\t\"tag_CustomTag\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tmetrics, labels := BuildNamespaceInfoMetrics(tc.resources, tc.metrics, tc.observedMetricLabels, tc.labelsSnakeCase, promslog.NewNopLogger())\n\t\t\trequire.Equal(t, tc.expectedMetrics, metrics)\n\t\t\trequire.Equal(t, tc.expectedLabels, labels)\n\t\t})\n\t}\n}\n\nfunc TestBuildMetrics(t *testing.T) {\n\tts := time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)\n\tnullTs := time.Time{}\n\n\ttype testCase struct {\n\t\tname            string\n\t\tdata            []model.CloudwatchMetricResult\n\t\tlabelsSnakeCase bool\n\t\texpectedMetrics []*PrometheusMetric\n\t\texpectedLabels  map[string]model.LabelSet\n\t\texpectedErr     error\n\t}\n\n\ttestCases := []testCase{\n\t\t{\n\t\t\tname: \"metric with GetMetricDataResult and non-nil datapoint\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"FreeableMemory\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(2), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkBytesIn\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(3), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkBytesOut\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(4), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkPacketsIn\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t\t\t\tExportAllDataPoints:    true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t\t\t\t\t{Value: aws.Float64(4), Timestamp: ts},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(5), Timestamp: ts.Add(-1 * time.Minute)},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(6), Timestamp: ts.Add(-2 * time.Minute)},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkPacketsOut\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t\t\t\tExportAllDataPoints:    true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t\t\t\t\t{Value: nil, Timestamp: ts},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(5), Timestamp: ts.Add(-1 * time.Minute)},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(6), Timestamp: ts.Add(-2 * time.Minute)},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkMaxBytesIn\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t\t\t\tExportAllDataPoints:    false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic: \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{\n\t\t\t\t\t\t\t\t{Value: nil, Timestamp: ts},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(5), Timestamp: ts.Add(-1 * time.Minute)},\n\t\t\t\t\t\t\t\t{Value: aws.Float64(6), Timestamp: ts.Add(-2 * time.Minute)},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: false,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_cpuutilization_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_freeable_memory_average\",\n\t\t\t\t\tValue:     2,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_network_bytes_in_average\",\n\t\t\t\t\tValue:     3,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_bytes_out_average\",\n\t\t\t\t\tValue:            4,\n\t\t\t\t\tTimestamp:        ts,\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_packets_in_average\",\n\t\t\t\t\tValue:            4,\n\t\t\t\t\tTimestamp:        ts,\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_packets_in_average\",\n\t\t\t\t\tValue:            5,\n\t\t\t\t\tTimestamp:        ts.Add(-1 * time.Minute),\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_packets_in_average\",\n\t\t\t\t\tValue:            6,\n\t\t\t\t\tTimestamp:        ts.Add(-2 * time.Minute),\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_packets_out_average\",\n\t\t\t\t\tValue:            5,\n\t\t\t\t\tTimestamp:        ts.Add(-1 * time.Minute),\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:             \"aws_elasticache_network_packets_out_average\",\n\t\t\t\t\tValue:            6,\n\t\t\t\t\tTimestamp:        ts.Add(-2 * time.Minute),\n\t\t\t\t\tIncludeTimestamp: true,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_freeable_memory_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_network_bytes_in_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_network_bytes_out_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_network_packets_in_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_network_packets_out_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"metric with GetMetricDataResult and nil datapoint\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: nil, Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"FreeableMemory\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: nil, Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkBytesIn\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: nil, Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"NetworkBytesOut\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: nil, Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: false,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_cpuutilization_average\",\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\tIncludeTimestamp: false,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_freeable_memory_average\",\n\t\t\t\t\tValue:     math.NaN(),\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\tIncludeTimestamp: false,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_network_bytes_in_average\",\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":               \"123456789012\",\n\t\t\t\t\t\t\"name\":                     \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                   \"us-east-1\",\n\t\t\t\t\t\t\"dimension_CacheClusterId\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t\tIncludeTimestamp: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_freeable_memory_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t\t\"aws_elasticache_network_bytes_in_average\": {\n\t\t\t\t\t\"account_id\":               {},\n\t\t\t\t\t\"name\":                     {},\n\t\t\t\t\t\"region\":                   {},\n\t\t\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"label snake case\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_cpuutilization_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":                 \"123456789012\",\n\t\t\t\t\t\t\"name\":                       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                     \"us-east-1\",\n\t\t\t\t\t\t\"dimension_cache_cluster_id\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":                 {},\n\t\t\t\t\t\"name\":                       {},\n\t\t\t\t\t\"region\":                     {},\n\t\t\t\t\t\"dimension_cache_cluster_id\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"metric with nonstandard namespace\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"/aws/sagemaker/TrainingJobs\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"Host\",\n\t\t\t\t\t\t\t\tValue: \"sagemaker-xgboost\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:sagemaker:us-east-1:123456789012:training-job/sagemaker-xgboost\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_sagemaker_trainingjobs_cpuutilization_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":     \"123456789012\",\n\t\t\t\t\t\t\"name\":           \"arn:aws:sagemaker:us-east-1:123456789012:training-job/sagemaker-xgboost\",\n\t\t\t\t\t\t\"region\":         \"us-east-1\",\n\t\t\t\t\t\t\"dimension_host\": \"sagemaker-xgboost\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_sagemaker_trainingjobs_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":     {},\n\t\t\t\t\t\"name\":           {},\n\t\t\t\t\t\"region\":         {},\n\t\t\t\t\t\"dimension_host\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"metric with metric name that does duplicates part of the namespace as a prefix\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"glue.driver.aggregate.bytesRead\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"Glue\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"JobName\",\n\t\t\t\t\t\t\t\tValue: \"test-job\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:glue:us-east-1:123456789012:job/test-job\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_glue_driver_aggregate_bytes_read_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":         \"123456789012\",\n\t\t\t\t\t\t\"name\":               \"arn:aws:glue:us-east-1:123456789012:job/test-job\",\n\t\t\t\t\t\t\"region\":             \"us-east-1\",\n\t\t\t\t\t\t\"dimension_job_name\": \"test-job\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_glue_driver_aggregate_bytes_read_average\": {\n\t\t\t\t\t\"account_id\":         {},\n\t\t\t\t\t\"name\":               {},\n\t\t\t\t\t\"region\":             {},\n\t\t\t\t\t\"dimension_job_name\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"metric with metric name that does not duplicate part of the namespace as a prefix\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:     \"us-east-1\",\n\t\t\t\t\tAccountID:  \"123456789012\",\n\t\t\t\t\tCustomTags: nil,\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"aggregate.glue.jobs.bytesRead\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"Glue\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"JobName\",\n\t\t\t\t\t\t\t\tValue: \"test-job\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:glue:us-east-1:123456789012:job/test-job\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_glue_aggregate_glue_jobs_bytes_read_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":         \"123456789012\",\n\t\t\t\t\t\t\"name\":               \"arn:aws:glue:us-east-1:123456789012:job/test-job\",\n\t\t\t\t\t\t\"region\":             \"us-east-1\",\n\t\t\t\t\t\t\"dimension_job_name\": \"test-job\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_glue_aggregate_glue_jobs_bytes_read_average\": {\n\t\t\t\t\t\"account_id\":         {},\n\t\t\t\t\t\"name\":               {},\n\t\t\t\t\t\"region\":             {},\n\t\t\t\t\t\"dimension_job_name\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"custom tag\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\t\tCustomTags: []model.Tag{{\n\t\t\t\t\t\tKey:   \"billable-to\",\n\t\t\t\t\t\tValue: \"api\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_cpuutilization_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":                 \"123456789012\",\n\t\t\t\t\t\t\"name\":                       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                     \"us-east-1\",\n\t\t\t\t\t\t\"dimension_cache_cluster_id\": \"redis-cluster\",\n\t\t\t\t\t\t\"custom_tag_billable_to\":     \"api\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":                 {},\n\t\t\t\t\t\"name\":                       {},\n\t\t\t\t\t\"region\":                     {},\n\t\t\t\t\t\"dimension_cache_cluster_id\": {},\n\t\t\t\t\t\"custom_tag_billable_to\":     {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"scraping with aws account alias\",\n\t\t\tdata: []model.CloudwatchMetricResult{{\n\t\t\t\tContext: &model.ScrapeContext{\n\t\t\t\t\tRegion:       \"us-east-1\",\n\t\t\t\t\tAccountID:    \"123456789012\",\n\t\t\t\t\tAccountAlias: \"billingacct\",\n\t\t\t\t},\n\t\t\t\tData: []*model.CloudwatchData{\n\t\t\t\t\t{\n\t\t\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t\tlabelsSnakeCase: true,\n\t\t\texpectedMetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:      \"aws_elasticache_cpuutilization_average\",\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: nullTs,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"account_id\":                 \"123456789012\",\n\t\t\t\t\t\t\"account_alias\":              \"billingacct\",\n\t\t\t\t\t\t\"name\":                       \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\t\t\t\"region\":                     \"us-east-1\",\n\t\t\t\t\t\t\"dimension_cache_cluster_id\": \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedLabels: map[string]model.LabelSet{\n\t\t\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\t\t\"account_id\":                 {},\n\t\t\t\t\t\"account_alias\":              {},\n\t\t\t\t\t\"name\":                       {},\n\t\t\t\t\t\"region\":                     {},\n\t\t\t\t\t\"dimension_cache_cluster_id\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tres, labels, err := BuildMetrics(tc.data, tc.labelsSnakeCase, promslog.NewNopLogger())\n\t\t\tif tc.expectedErr != nil {\n\t\t\t\trequire.Equal(t, tc.expectedErr, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, replaceNaNValues(tc.expectedMetrics), replaceNaNValues(res))\n\t\t\t\trequire.Equal(t, tc.expectedLabels, labels)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Benchmark_BuildMetrics(b *testing.B) {\n\tts := time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)\n\n\tdata := []model.CloudwatchMetricResult{{\n\t\tContext: &model.ScrapeContext{\n\t\t\tRegion:     \"us-east-1\",\n\t\t\tAccountID:  \"123456789012\",\n\t\t\tCustomTags: nil,\n\t\t},\n\t\tData: []*model.CloudwatchData{\n\t\t\t{\n\t\t\t\tMetricName: \"CPUUtilization\",\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(1), Timestamp: ts}},\n\t\t\t\t},\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\tTags: []model.Tag{{\n\t\t\t\t\tKey:   \"managed_by\",\n\t\t\t\t\tValue: \"terraform\",\n\t\t\t\t}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tMetricName: \"FreeableMemory\",\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              false,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(2), Timestamp: ts}},\n\t\t\t\t},\n\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\tTags: []model.Tag{{\n\t\t\t\t\tKey:   \"managed_by\",\n\t\t\t\t\tValue: \"terraform\",\n\t\t\t\t}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tMetricName: \"NetworkBytesIn\",\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\tAddCloudwatchTimestamp: false,\n\t\t\t\t},\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(3), Timestamp: ts}},\n\t\t\t\t},\n\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\tTags: []model.Tag{{\n\t\t\t\t\tKey:   \"managed_by\",\n\t\t\t\t\tValue: \"terraform\",\n\t\t\t\t}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tMetricName: \"NetworkBytesOut\",\n\t\t\t\tMetricMigrationParams: model.MetricMigrationParams{\n\t\t\t\t\tNilToZero:              true,\n\t\t\t\t\tAddCloudwatchTimestamp: true,\n\t\t\t\t},\n\t\t\t\tNamespace: \"AWS/ElastiCache\",\n\t\t\t\tDimensions: []model.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"CacheClusterId\",\n\t\t\t\t\t\tValue: \"redis-cluster\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tGetMetricDataResult: &model.GetMetricDataResult{\n\t\t\t\t\tStatistic:  \"Average\",\n\t\t\t\t\tDataPoints: []model.DataPoint{{Value: aws.Float64(4), Timestamp: ts}},\n\t\t\t\t},\n\t\t\t\tResourceName: \"arn:aws:elasticache:us-east-1:123456789012:cluster:redis-cluster\",\n\t\t\t\tTags: []model.Tag{{\n\t\t\t\t\tKey:   \"managed_by\",\n\t\t\t\t\tValue: \"terraform\",\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t}}\n\n\tvar labels map[string]model.LabelSet\n\tvar err error\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, labels, err = BuildMetrics(data, false, promslog.NewNopLogger())\n\t}\n\n\texpectedLabels := map[string]model.LabelSet{\n\t\t\"aws_elasticache_cpuutilization_average\": {\n\t\t\t\"account_id\":               {},\n\t\t\t\"name\":                     {},\n\t\t\t\"region\":                   {},\n\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\"tag_managed_by\":           {},\n\t\t},\n\t\t\"aws_elasticache_freeable_memory_average\": {\n\t\t\t\"account_id\":               {},\n\t\t\t\"name\":                     {},\n\t\t\t\"region\":                   {},\n\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\"tag_managed_by\":           {},\n\t\t},\n\t\t\"aws_elasticache_network_bytes_in_average\": {\n\t\t\t\"account_id\":               {},\n\t\t\t\"name\":                     {},\n\t\t\t\"region\":                   {},\n\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\"tag_managed_by\":           {},\n\t\t},\n\t\t\"aws_elasticache_network_bytes_out_average\": {\n\t\t\t\"account_id\":               {},\n\t\t\t\"name\":                     {},\n\t\t\t\"region\":                   {},\n\t\t\t\"dimension_CacheClusterId\": {},\n\t\t\t\"tag_managed_by\":           {},\n\t\t},\n\t}\n\n\trequire.NoError(b, err)\n\trequire.Equal(b, expectedLabels, labels)\n}\n\nfunc TestBuildMetricName(t *testing.T) {\n\ttype testCase struct {\n\t\tname      string\n\t\tnamespace string\n\t\tmetric    string\n\t\tstatistic string\n\t\texpected  string\n\t}\n\n\ttestCases := []testCase{\n\t\t{\n\t\t\tname:      \"standard AWS namespace\",\n\t\t\tnamespace: \"AWS/ElastiCache\",\n\t\t\tmetric:    \"CPUUtilization\",\n\t\t\tstatistic: \"Average\",\n\t\t\texpected:  \"aws_elasticache_cpuutilization_average\",\n\t\t},\n\t\t{\n\t\t\tname:      \"nonstandard namespace with slashes\",\n\t\t\tnamespace: \"/aws/sagemaker/TrainingJobs\",\n\t\t\tmetric:    \"CPUUtilization\",\n\t\t\tstatistic: \"Average\",\n\t\t\texpected:  \"aws_sagemaker_trainingjobs_cpuutilization_average\",\n\t\t},\n\t\t{\n\t\t\tname:      \"metric name duplicating namespace\",\n\t\t\tnamespace: \"Glue\",\n\t\t\tmetric:    \"glue.driver.aggregate.bytesRead\",\n\t\t\tstatistic: \"Average\",\n\t\t\texpected:  \"aws_glue_driver_aggregate_bytes_read_average\",\n\t\t},\n\t\t{\n\t\t\tname:      \"metric name not duplicating namespace\",\n\t\t\tnamespace: \"Glue\",\n\t\t\tmetric:    \"aggregate.glue.jobs.bytesRead\",\n\t\t\tstatistic: \"Average\",\n\t\t\texpected:  \"aws_glue_aggregate_glue_jobs_bytes_read_average\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult := BuildMetricName(tc.namespace, tc.metric, tc.statistic)\n\t\t\trequire.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n\nfunc Benchmark_BuildMetricName(b *testing.B) {\n\ttestCases := []struct {\n\t\tnamespace string\n\t\tmetric    string\n\t\tstatistic string\n\t}{\n\t\t{\n\t\t\tnamespace: \"AWS/ElastiCache\",\n\t\t\tmetric:    \"CPUUtilization\",\n\t\t\tstatistic: \"Average\",\n\t\t},\n\t\t{\n\t\t\tnamespace: \"/aws/sagemaker/TrainingJobs\",\n\t\t\tmetric:    \"CPUUtilization\",\n\t\t\tstatistic: \"Average\",\n\t\t},\n\t\t{\n\t\t\tnamespace: \"Glue\",\n\t\t\tmetric:    \"glue.driver.aggregate.bytesRead\",\n\t\t\tstatistic: \"Average\",\n\t\t},\n\t\t{\n\t\t\tnamespace: \"Glue\",\n\t\t\tmetric:    \"aggregate.glue.jobs.bytesRead\",\n\t\t\tstatistic: \"Average\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttestName := BuildMetricName(tc.namespace, tc.metric, tc.statistic)\n\t\tb.ResetTimer()\n\t\tb.ReportAllocs()\n\t\tb.Run(testName, func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tBuildMetricName(tc.namespace, tc.metric, tc.statistic)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// replaceNaNValues replaces any NaN floating-point values with a marker value (54321.0)\n// so that require.Equal() can compare them. By default, require.Equal() will fail if any\n// struct values are NaN because NaN != NaN\nfunc replaceNaNValues(metrics []*PrometheusMetric) []*PrometheusMetric {\n\tfor _, metric := range metrics {\n\t\tif math.IsNaN(metric.Value) {\n\t\t\tmetric.Value = 54321.0\n\t\t}\n\t}\n\treturn metrics\n}\n\n// TestSortByTimeStamp validates that sortByTimestamp() sorts in descending order.\nfunc TestSortByTimeStamp(t *testing.T) {\n\tts := time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)\n\tdataPointMiddle := &model.MetricStatisticsResult{\n\t\tTimestamp: aws.Time(ts.Add(time.Minute * 2 * -1)),\n\t\tMaximum:   aws.Float64(2),\n\t}\n\n\tdataPointNewest := &model.MetricStatisticsResult{\n\t\tTimestamp: aws.Time(ts.Add(time.Minute * -1)),\n\t\tMaximum:   aws.Float64(1),\n\t}\n\n\tdataPointOldest := &model.MetricStatisticsResult{\n\t\tTimestamp: aws.Time(ts.Add(time.Minute * 3 * -1)),\n\t\tMaximum:   aws.Float64(3),\n\t}\n\n\tcloudWatchDataPoints := []*model.MetricStatisticsResult{\n\t\tdataPointMiddle,\n\t\tdataPointNewest,\n\t\tdataPointOldest,\n\t}\n\n\tsortedDataPoints := sortByTimestamp(cloudWatchDataPoints)\n\n\texpectedDataPoints := []*model.MetricStatisticsResult{\n\t\tdataPointNewest,\n\t\tdataPointMiddle,\n\t\tdataPointOldest,\n\t}\n\n\trequire.Equal(t, expectedDataPoints, sortedDataPoints)\n}\n\nfunc Test_EnsureLabelConsistencyAndRemoveDuplicates(t *testing.T) {\n\ttestCases := []struct {\n\t\tname           string\n\t\tmetrics        []*PrometheusMetric\n\t\tobservedLabels map[string]model.LabelSet\n\t\toutput         []*PrometheusMetric\n\t}{\n\t\t{\n\t\t\tname: \"adds missing labels\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t\tValue:  1.0,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t\tValue:  2.0,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{},\n\t\t\t\t\tValue:  3.0,\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{\"metric1\": {\"label1\": {}, \"label2\": {}, \"label3\": {}}},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\", \"label2\": \"\", \"label3\": \"\"},\n\t\t\t\t\tValue:  1.0,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"\", \"label3\": \"\", \"label2\": \"value2\"},\n\t\t\t\t\tValue:  2.0,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"\", \"label2\": \"\", \"label3\": \"\"},\n\t\t\t\t\tValue:  3.0,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"duplicate metric\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"duplicate metric, multiple labels\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\", \"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\", \"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\", \"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"metric with different labels\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"two metrics\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"two metrics with different labels\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple duplicates and non-duplicates\",\n\t\t\tmetrics: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tobservedLabels: map[string]model.LabelSet{},\n\t\t\toutput: []*PrometheusMetric{\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label2\": \"value2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric2\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   \"metric1\",\n\t\t\t\t\tLabels: map[string]string{\"label1\": \"value1\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tactual := EnsureLabelConsistencyAndRemoveDuplicates(tc.metrics, tc.observedLabels)\n\t\t\trequire.ElementsMatch(t, tc.output, actual)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/promutil/prometheus.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage promutil\n\nimport (\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/common/model\"\n\t\"golang.org/x/exp/maps\"\n)\n\nvar (\n\tCloudwatchAPIErrorCounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_request_errors\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t}, []string{\"api_name\"})\n\tCloudwatchAPICounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_requests_total\",\n\t\tHelp: \"Number of calls made to the CloudWatch APIs\",\n\t}, []string{\"api_name\"})\n\tCloudwatchGetMetricDataAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_getmetricdata_requests_total\",\n\t\tHelp: \"DEPRECATED: replaced by yace_cloudwatch_requests_total with api_name label\",\n\t})\n\tCloudwatchGetMetricDataAPIMetricsCounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_getmetricdata_metrics_requested_total\",\n\t\tHelp: \"Number of metrics requested from the CloudWatch GetMetricData API which is how AWS bills\",\n\t})\n\tCloudwatchGetMetricStatisticsAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_getmetricstatistics_requests_total\",\n\t\tHelp: \"DEPRECATED: replaced by yace_cloudwatch_requests_total with api_name label\",\n\t})\n\tResourceGroupTaggingAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_resourcegrouptaggingapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tAutoScalingAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_autoscalingapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tTargetGroupsAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_targetgroupapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tAPIGatewayAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_apigatewayapi_requests_total\",\n\t})\n\tAPIGatewayAPIV2Counter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_apigatewayapiv2_requests_total\",\n\t})\n\tEc2APICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_ec2api_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tShieldAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_shieldapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tManagedPrometheusAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_managedprometheusapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tStoragegatewayAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_storagegatewayapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tDmsAPICounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_dmsapi_requests_total\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n\tDuplicateMetricsFilteredCounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"yace_cloudwatch_duplicate_metrics_filtered\",\n\t\tHelp: \"Help is not implemented yet.\",\n\t})\n)\n\nvar replacer = strings.NewReplacer(\n\t\" \", \"_\",\n\t\",\", \"_\",\n\t\"\\t\", \"_\",\n\t\"/\", \"_\",\n\t\"\\\\\", \"_\",\n\t\".\", \"_\",\n\t\"-\", \"_\",\n\t\":\", \"_\",\n\t\"=\", \"_\",\n\t\"“\", \"_\",\n\t\"@\", \"_\",\n\t\"<\", \"_\",\n\t\">\", \"_\",\n\t\"(\", \"_\",\n\t\")\", \"_\",\n\t\"%\", \"_percent\",\n)\n\ntype PrometheusMetric struct {\n\tName             string\n\tLabels           map[string]string\n\tValue            float64\n\tIncludeTimestamp bool\n\tTimestamp        time.Time\n}\n\ntype PrometheusCollector struct {\n\tmetrics []prometheus.Metric\n}\n\nfunc NewPrometheusCollector(metrics []*PrometheusMetric) *PrometheusCollector {\n\treturn &PrometheusCollector{\n\t\tmetrics: toConstMetrics(metrics),\n\t}\n}\n\nfunc (p *PrometheusCollector) Describe(_ chan<- *prometheus.Desc) {\n\t// The exporter produces a dynamic set of metrics and the docs for prometheus.Collector Describe say\n\t// \tSending no descriptor at all marks the Collector as “unchecked”,\n\t// \ti.e. no checks will be performed at registration time, and the\n\t// \tCollector may yield any Metric it sees fit in its Collect method.\n\t// Based on our use an \"unchecked\" collector is perfectly fine\n}\n\nfunc (p *PrometheusCollector) Collect(metrics chan<- prometheus.Metric) {\n\tfor _, metric := range p.metrics {\n\t\tmetrics <- metric\n\t}\n}\n\nfunc toConstMetrics(metrics []*PrometheusMetric) []prometheus.Metric {\n\t// We keep two fast lookup maps here one for the prometheus.Desc of a metric which can be reused for each metric with\n\t// the same name and the expected label key order of a particular metric name.\n\t// The prometheus.Desc object is expensive to create and being able to reuse it for all metrics with the same name\n\t// results in large performance gain. We use the other map because metrics created using the Desc only provide label\n\t// values and they must be provided in the exact same order as registered in the Desc.\n\tmetricToDesc := map[string]*prometheus.Desc{}\n\tmetricToExpectedLabelOrder := map[string][]string{}\n\n\tresult := make([]prometheus.Metric, 0, len(metrics))\n\tfor _, metric := range metrics {\n\t\tmetricName := metric.Name\n\t\tif _, ok := metricToDesc[metricName]; !ok {\n\t\t\tlabelKeys := maps.Keys(metric.Labels)\n\t\t\tmetricToDesc[metricName] = prometheus.NewDesc(metricName, \"Help is not implemented yet.\", labelKeys, nil)\n\t\t\tmetricToExpectedLabelOrder[metricName] = labelKeys\n\t\t}\n\t\tmetricsDesc := metricToDesc[metricName]\n\n\t\t// Create the label values using the label order of the Desc\n\t\tlabelValues := make([]string, 0, len(metric.Labels))\n\t\tfor _, labelKey := range metricToExpectedLabelOrder[metricName] {\n\t\t\tlabelValues = append(labelValues, metric.Labels[labelKey])\n\t\t}\n\n\t\tpromMetric, err := prometheus.NewConstMetric(metricsDesc, prometheus.GaugeValue, metric.Value, labelValues...)\n\t\tif err != nil {\n\t\t\t// If for whatever reason the metric or metricsDesc is considered invalid this will ensure the error is\n\t\t\t// reported through the collector\n\t\t\tpromMetric = prometheus.NewInvalidMetric(metricsDesc, err)\n\t\t} else if metric.IncludeTimestamp {\n\t\t\tpromMetric = prometheus.NewMetricWithTimestamp(metric.Timestamp, promMetric)\n\t\t}\n\n\t\tresult = append(result, promMetric)\n\t}\n\n\treturn result\n}\n\nfunc PromString(text string) string {\n\tvar buf strings.Builder\n\tPromStringToBuilder(text, &buf)\n\treturn buf.String()\n}\n\nfunc PromStringToBuilder(text string, buf *strings.Builder) {\n\tbuf.Grow(len(text))\n\n\tvar prev rune\n\tfor _, c := range text {\n\t\tswitch c {\n\t\tcase ' ', ',', '\\t', '/', '\\\\', '.', '-', ':', '=', '@', '<', '>', '(', ')', '“':\n\t\t\tbuf.WriteRune('_')\n\t\tcase '%':\n\t\t\tbuf.WriteString(\"_percent\")\n\t\tdefault:\n\t\t\tif unicode.IsUpper(c) && (unicode.IsLower(prev) || unicode.IsDigit(prev)) {\n\t\t\t\tbuf.WriteRune('_')\n\t\t\t}\n\t\t\tbuf.WriteRune(unicode.ToLower(c))\n\t\t}\n\t\tprev = c\n\t}\n}\n\nfunc PromStringTag(text string, labelsSnakeCase bool) (bool, string) {\n\tvar s string\n\tif labelsSnakeCase {\n\t\ts = PromString(text)\n\t} else {\n\t\ts = sanitize(text)\n\t}\n\treturn model.LabelName(s).IsValid(), s //nolint:staticcheck\n}\n\n// sanitize replaces some invalid chars with an underscore\nfunc sanitize(text string) string {\n\tif strings.ContainsAny(text, \"“%\") {\n\t\t// fallback to the replacer for complex cases:\n\t\t// - '“' is non-ascii rune\n\t\t// - '%' is replaced with a whole string\n\t\treturn replacer.Replace(text)\n\t}\n\n\tb := []byte(text)\n\tfor i := 0; i < len(b); i++ {\n\t\tswitch b[i] {\n\t\tcase ' ', ',', '\\t', '/', '\\\\', '.', '-', ':', '=', '@', '<', '>', '(', ')':\n\t\t\tb[i] = '_'\n\t\t}\n\t}\n\treturn string(b)\n}\n"
  },
  {
    "path": "pkg/promutil/prometheus_test.go",
    "content": "// Copyright 2024 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\npackage promutil\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/model\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSanitize(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput  string\n\t\toutput string\n\t}{\n\t\t{\n\t\t\tinput:  \"Global.Topic.Count\",\n\t\t\toutput: \"Global_Topic_Count\",\n\t\t},\n\t\t{\n\t\t\tinput:  \"Status.Check.Failed_Instance\",\n\t\t\toutput: \"Status_Check_Failed_Instance\",\n\t\t},\n\t\t{\n\t\t\tinput:  \"IHaveA%Sign\",\n\t\t\toutput: \"IHaveA_percentSign\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tassert.Equal(t, tc.output, sanitize(tc.input))\n\t}\n}\n\nfunc TestPromStringTag(t *testing.T) {\n\toriginalValidationScheme := model.NameValidationScheme //nolint:staticcheck\n\tmodel.NameValidationScheme = model.LegacyValidation    //nolint:staticcheck\n\tdefer func() {\n\t\tmodel.NameValidationScheme = originalValidationScheme //nolint:staticcheck\n\t}()\n\n\ttestCases := []struct {\n\t\tname        string\n\t\tlabel       string\n\t\ttoSnakeCase bool\n\t\tok          bool\n\t\tout         string\n\t}{\n\t\t{\n\t\t\tname:        \"valid\",\n\t\t\tlabel:       \"labelName\",\n\t\t\ttoSnakeCase: false,\n\t\t\tok:          true,\n\t\t\tout:         \"labelName\",\n\t\t},\n\t\t{\n\t\t\tname:        \"valid, convert to snake case\",\n\t\t\tlabel:       \"labelName\",\n\t\t\ttoSnakeCase: true,\n\t\t\tok:          true,\n\t\t\tout:         \"label_name\",\n\t\t},\n\t\t{\n\t\t\tname:        \"valid (snake case)\",\n\t\t\tlabel:       \"label_name\",\n\t\t\ttoSnakeCase: false,\n\t\t\tok:          true,\n\t\t\tout:         \"label_name\",\n\t\t},\n\t\t{\n\t\t\tname:        \"valid (snake case) unchanged\",\n\t\t\tlabel:       \"label_name\",\n\t\t\ttoSnakeCase: true,\n\t\t\tok:          true,\n\t\t\tout:         \"label_name\",\n\t\t},\n\t\t{\n\t\t\tname:        \"invalid chars\",\n\t\t\tlabel:       \"invalidChars@$\",\n\t\t\ttoSnakeCase: false,\n\t\t\tok:          false,\n\t\t\tout:         \"\",\n\t\t},\n\t\t{\n\t\t\tname:        \"invalid chars, convert to snake case\",\n\t\t\tlabel:       \"invalidChars@$\",\n\t\t\ttoSnakeCase: true,\n\t\t\tok:          false,\n\t\t\tout:         \"\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tok, out := PromStringTag(tc.label, tc.toSnakeCase)\n\t\t\tassert.Equal(t, tc.ok, ok)\n\t\t\tif ok {\n\t\t\t\tassert.Equal(t, tc.out, out)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewPrometheusCollector_CanReportMetricsAndErrors(t *testing.T) {\n\toriginalValidationScheme := model.NameValidationScheme //nolint:staticcheck\n\tmodel.NameValidationScheme = model.LegacyValidation    //nolint:staticcheck\n\tdefer func() {\n\t\tmodel.NameValidationScheme = originalValidationScheme //nolint:staticcheck\n\t}()\n\n\tmetrics := []*PrometheusMetric{\n\t\t{\n\t\t\tName:             \"this*is*not*valid\",\n\t\t\tLabels:           map[string]string{},\n\t\t\tValue:            0,\n\t\t\tIncludeTimestamp: false,\n\t\t},\n\t\t{\n\t\t\tName:             \"this_is_valid\",\n\t\t\tLabels:           map[string]string{\"key\": \"value1\"},\n\t\t\tValue:            0,\n\t\t\tIncludeTimestamp: false,\n\t\t},\n\t}\n\tcollector := NewPrometheusCollector(metrics)\n\tregistry := prometheus.NewRegistry()\n\trequire.NoError(t, registry.Register(collector))\n\tfamilies, err := registry.Gather()\n\tassert.Error(t, err)\n\tassert.Len(t, families, 1)\n\tfamily := families[0]\n\tassert.Equal(t, \"this_is_valid\", family.GetName())\n}\n\nfunc TestNewPrometheusCollector_CanReportMetrics(t *testing.T) {\n\tts := time.Now()\n\n\tlabelSet1 := map[string]string{\"key1\": \"value\", \"key2\": \"value\", \"key3\": \"value\"}\n\tlabelSet2 := map[string]string{\"key2\": \"out\", \"key3\": \"of\", \"key1\": \"order\"}\n\tlabelSet3 := map[string]string{\"key2\": \"out\", \"key1\": \"of\", \"key3\": \"order\"}\n\tmetrics := []*PrometheusMetric{\n\t\t{\n\t\t\tName:             \"metric_with_labels\",\n\t\t\tLabels:           labelSet1,\n\t\t\tValue:            1,\n\t\t\tIncludeTimestamp: false,\n\t\t},\n\t\t{\n\t\t\tName:             \"metric_with_labels\",\n\t\t\tLabels:           labelSet2,\n\t\t\tValue:            2,\n\t\t\tIncludeTimestamp: false,\n\t\t},\n\t\t{\n\t\t\tName:             \"metric_with_labels\",\n\t\t\tLabels:           labelSet3,\n\t\t\tValue:            3,\n\t\t\tIncludeTimestamp: false,\n\t\t},\n\t\t{\n\t\t\tName:             \"metric_with_timestamp\",\n\t\t\tLabels:           map[string]string{},\n\t\t\tValue:            1,\n\t\t\tIncludeTimestamp: true,\n\t\t\tTimestamp:        ts,\n\t\t},\n\t}\n\n\tcollector := NewPrometheusCollector(metrics)\n\tregistry := prometheus.NewRegistry()\n\trequire.NoError(t, registry.Register(collector))\n\tfamilies, err := registry.Gather()\n\tassert.NoError(t, err)\n\tassert.Len(t, families, 2)\n\n\tvar metricWithLabels *dto.MetricFamily\n\tvar metricWithTs *dto.MetricFamily\n\n\tfor _, metricFamily := range families {\n\t\tassert.Equal(t, dto.MetricType_GAUGE, metricFamily.GetType())\n\n\t\tswitch {\n\t\tcase metricFamily.GetName() == \"metric_with_labels\":\n\t\t\tmetricWithLabels = metricFamily\n\t\tcase metricFamily.GetName() == \"metric_with_timestamp\":\n\t\t\tmetricWithTs = metricFamily\n\t\tdefault:\n\t\t\trequire.Failf(t, \"Encountered an unexpected metric family %s\", metricFamily.GetName())\n\t\t}\n\t}\n\trequire.NotNil(t, metricWithLabels)\n\trequire.NotNil(t, metricWithTs)\n\n\tassert.Len(t, metricWithLabels.Metric, 3)\n\tfor _, metric := range metricWithLabels.Metric {\n\t\tassert.Len(t, metric.Label, 3)\n\t\tvar labelSetToMatch map[string]string\n\t\tswitch *metric.Gauge.Value {\n\t\tcase 1.0:\n\t\t\tlabelSetToMatch = labelSet1\n\t\tcase 2.0:\n\t\t\tlabelSetToMatch = labelSet2\n\t\tcase 3.0:\n\t\t\tlabelSetToMatch = labelSet3\n\t\tdefault:\n\t\t\trequire.Fail(t, \"Encountered an metric value value %v\", *metric.Gauge.Value)\n\t\t}\n\n\t\tfor _, labelPairs := range metric.Label {\n\t\t\trequire.Contains(t, labelSetToMatch, *labelPairs.Name)\n\t\t\trequire.Equal(t, labelSetToMatch[*labelPairs.Name], *labelPairs.Value)\n\t\t}\n\t}\n\n\trequire.Len(t, metricWithTs.Metric, 1)\n\ttsMetric := metricWithTs.Metric[0]\n\tassert.Equal(t, ts.UnixMilli(), *tsMetric.TimestampMs)\n\tassert.Equal(t, 1.0, *tsMetric.Gauge.Value)\n}\n"
  }
]