Repository: open-policy-agent/kube-mgmt Branch: master Commit: 39dabfd6337c Files: 101 Total size: 250.2 KB Directory structure: gitextract_9hv5t40p/ ├── .dockerignore ├── .editorconfig ├── .github/ │ └── workflows/ │ ├── build.yaml │ ├── cache.yaml │ └── release.yaml ├── .gitignore ├── .ko.yaml ├── LICENSE ├── README.md ├── charts/ │ └── opa-kube-mgmt/ │ ├── Chart.yaml │ ├── README.md │ ├── templates/ │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── ingressroute.yaml │ │ ├── mgmt-token-secret.yaml │ │ ├── poddisruptionbudget.yaml │ │ ├── rbac-mgmt-replicate.yaml │ │ ├── rbac-mgmt.yaml │ │ ├── rbac-sar.yaml │ │ ├── secret-opa-config.yaml │ │ ├── service.yaml │ │ ├── serviceaccount.yaml │ │ ├── servicemonitor.yaml │ │ └── webhookconfiguration.yaml │ ├── values.schema.json │ └── values.yaml ├── cmd/ │ └── kube-mgmt/ │ ├── flag.go │ ├── flag_test.go │ └── main.go ├── devbox.json ├── devspace.yaml ├── docs/ │ ├── admission-control-1.7.md │ ├── admission-control-crd.md │ ├── admission-control-secure.md │ └── tls-1.7.md ├── examples/ │ └── service_validation/ │ ├── README.md │ ├── admission_controller.yaml │ └── install.sh ├── go.mod ├── go.sum ├── internal/ │ └── expect/ │ ├── client.go │ ├── request.go │ └── script.go ├── justfile ├── pkg/ │ ├── configmap/ │ │ └── configmap.go │ ├── data/ │ │ ├── generic.go │ │ ├── generic_test.go │ │ └── types.go │ ├── dynamicdata/ │ │ ├── dynamicdata.go │ │ └── dynamicdata_test.go │ ├── opa/ │ │ ├── opa.go │ │ └── opa_test.go │ ├── types/ │ │ └── types.go │ └── version/ │ └── version.go └── test/ ├── e2e/ │ ├── custom_config/ │ │ ├── 1_bundle_loaded.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ ├── custom_mgmt_token/ │ │ ├── 1_policy_loaded.hurl │ │ ├── 2_data_loaded.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ ├── default/ │ │ ├── 1_initial_state.hurl │ │ ├── 2_policy_loaded.hurl │ │ ├── 3_data_loaded.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ ├── fixture-labels.yaml │ ├── fixture-multi.yaml │ ├── fixture-replication.yaml │ ├── fixture.yaml │ ├── labels/ │ │ ├── 1_initial_state.hurl │ │ ├── 2_policy_loaded.hurl │ │ ├── 3_data_loaded.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ ├── multi/ │ │ ├── 1_initial_state.hurl │ │ ├── 2_policies_loaded.hurl │ │ ├── 3_policy_unloaded.hurl │ │ ├── 4_policies_reloaded.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ ├── replicate/ │ │ ├── 1_replication.hurl │ │ ├── chainsaw-test.yaml │ │ └── values.yaml │ └── replicate_auto/ │ ├── .gitignore │ ├── 1_replication.hurl │ ├── bundle/ │ │ ├── .manifest │ │ └── main.rego │ ├── chainsaw-test.yaml │ └── values.yaml ├── lint/ │ ├── images.yaml │ ├── sa.yaml │ ├── service.yaml │ └── tsc.yaml └── unit/ ├── health.yaml ├── kube-mgmt_args.yaml ├── rbac_cm.yaml ├── rbac_replicate.yaml ├── sa.yaml ├── service.yaml └── tsc.yaml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ # exclude everything, then re-include only what the Go build needs * !go.mod !go.sum !cmd/ !pkg/ !internal/ ================================================ FILE: .editorconfig ================================================ root = true [*.{sh,yaml,md}] end_of_line = lf charset = utf-8 insert_final_newline = true trim_trailing_whitespace = true indent_style = space indent_size = 2 max_line_length = 120 [justfile] end_of_line = lf charset = utf-8 insert_final_newline = true trim_trailing_whitespace = true indent_style = space indent_size = 2 max_line_length = 120 ================================================ FILE: .github/workflows/build.yaml ================================================ name: Build on: workflow_dispatch: push: paths-ignore: - "docs/**" - "logo/**" - "examples/**" - "README.md" - "charts/opa-kube-mgmt/README.md" branches: - "master" pull_request: branches: - "master" - "feat/*" - "fix/*" jobs: build_job: name: Build runs-on: ubuntu-latest permissions: checks: write steps: - uses: actions/checkout@v6 - uses: actions/cache@v5 with: path: | ~/go/pkg/mod ~/go/bin ~/.local/share/helm/plugins key: go-tools-${{ hashFiles('devbox.json', 'go.mod', 'go.sum') }} restore-keys: | go-tools- - uses: jetify-com/devbox-install-action@v0.15.0 with: enable-cache: true - name: lint and unit test run: devbox run -- just test - name: publish helm lint report uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: "build/test-results/helm-unittest/lint.xml" check_name: "Helm Lint Tests" fail_on_failure: true detailed_summary: true include_passed: true - name: publish helm unit test report uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: "build/test-results/helm-unittest/unit.xml" check_name: "Helm Unit Tests" fail_on_failure: true detailed_summary: true include_passed: true - name: e2e test run: devbox run -- just all && devbox run -- just test-e2e-all - name: publish e2e test report uses: mikepenz/action-junit-report@v6 if: always() with: report_paths: "build/test-results/chainsaw/*.xml" check_name: "E2E Tests" fail_on_failure: true detailed_summary: true include_passed: true - name: failure logs if: ${{ failure() }} run: | echo "---------------------------------------" kubectl get all echo "---------------------------------------" kubectl describe po kube-mgmt-opa-kube-mgmt || true echo "---------------------------------------" kubectl logs -l app=kube-mgmt-opa-kube-mgmt -c opa --tail=-1 echo "---------------------------------------" kubectl logs -l app=kube-mgmt-opa-kube-mgmt -c mgmt --tail=-1 echo "---------------------------------------" ================================================ FILE: .github/workflows/cache.yaml ================================================ name: Update cache on: push: branches: - master workflow_dispatch: schedule: - cron: '0 6 */6 * *' jobs: update_cache: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions/cache@v5 with: path: | ~/go/pkg/mod ~/go/bin ~/.local/share/helm/plugins key: go-tools-${{ hashFiles('devbox.json', 'go.mod', 'go.sum') }} restore-keys: | go-tools- - uses: jetify-com/devbox-install-action@v0.15.0 with: enable-cache: true - run: | eval "$(devbox shellenv -c . --init-hook)" devbox run -- go mod download ================================================ FILE: .github/workflows/release.yaml ================================================ name: Release permissions: packages: write contents: write on: workflow_dispatch: {} push: tags: - '[0-9]+.[0-9]+.[0-9]+' jobs: docker_job: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: jetify-com/devbox-install-action@v0.15.0 with: enable-cache: true - uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_PASSWORD }} - uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ github.token }} - name: build and publish image, create chart archive run: devbox run -- devspace build --profile release - name: upload helm artifact uses: actions/upload-artifact@v7 with: name: "helm" path: "opa-kube-mgmt-*.tgz" helm_job: runs-on: ubuntu-latest needs: docker_job steps: - uses: actions/checkout@v6 with: ref: gh-pages - name: download helm artifact uses: actions/download-artifact@v8 id: download with: name: helm path: /tmp/helm - name: update helm index run: | helm repo index /tmp/helm --merge ./charts/index.yaml mv -f /tmp/helm/* ./charts - name: publish index and chart uses: actions-js/push@v1.5 with: github_token: ${{ secrets.GITHUB_TOKEN }} branch: gh-pages ================================================ FILE: .gitignore ================================================ ./kube-mgmt bin .go *.tgz .idea .vscode/settings.json .devspace/ build/ ================================================ FILE: .ko.yaml ================================================ defaultBaseImage: alpine:3.23.4 builds: - id: kube-mgmt main: ./cmd/kube-mgmt ldflags: - -X github.com/open-policy-agent/kube-mgmt/pkg/version.Version={{.Env.KO_VERSION}} - -X github.com/open-policy-agent/kube-mgmt/pkg/version.Git={{.Env.KO_COMMIT}} ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # ![logo](./logo/logo.png) kube-mgmt `kube-mgmt` manages policies / data of [Open Policy Agent](https://github.com/open-policy-agent/opa) instances in Kubernetes. Use `kube-mgmt` to: * Load policies and/or static data into OPA instance from `ConfigMap`. * Replicate Kubernetes resources including [CustomResourceDefinitions (CRDs)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) into OPA instance. ## Deployment Guide Both `OPA` and `kube-mgmt` can be installed using [opa-kube-mgmt]( https://artifacthub.io/packages/helm/opa-kube-mgmt/opa-kube-mgmt) Helm chart. Follow [README](charts/opa-kube-mgmt/README.md) to install it into K8s cluster. ## Policies and data loading `kube-mgmt` automatically discovers policies and JSON data stored in `ConfigMaps` in Kubernetes and loads them into OPA. `kube-mgmt` assumes a `ConfigMap` contains policy or JSON data if the `ConfigMap` is: - Created in a namespace listed in the `--namespaces` option. If you specify `--namespaces=*` then `kube-mgmt` will look for policies in ALL namespaces. - Labelled with `openpolicyagent.org/policy=rego` for policies - Labelled with `openpolicyagent.org/data=opa` for JSON data Policies or data discovery and loading can be disabled using `--enable-policy=false` or `--enable-data=false` flags respectively. Label names and their values can be configured using `--policy-label`, `--policy-value`, `--data-label`, `--data-value` CLI options. When a `ConfigMap` has been successfully loaded into OPA, the `openpolicyagent.org/kube-mgmt-status` annotation is set to `{"status": "ok"}`. If loading fails for some reason (e.g., because of a parse error), the `openpolicyagent.org/kube-mgmt-status` annotation is set to `{"status": "error", "error": ...}` where the `error` field contains details about the failure. Data loaded out of ConfigMaps is laid out as follows: ``` // ``` For example, if the following ConfigMap was created: ```yaml kind: ConfigMap apiVersion: v1 metadata: name: hello-data namespace: opa labels: openpolicyagent.org/data: opa data: x.json: | {"a": [1,2,3,4]} ``` Note: "x.json" may be any key. You could refer to the data inside your policies as follows: ```rego data.opa["hello-data"]["x.json"].a[0] # evaluates to 1 ``` ## K8s resource replication > [!WARNING] > K8s resource replication requires global cluster permission with `ClusterRole` and `ClusterRoleBinding`. `kube-mgmt` can be configured to replicate Kubernetes resources into OPA so that you can express policies over an eventually consistent cache of Kubernetes state. Replication is enabled with the following options: ```bash # Replicate namespace-level resources. May be specified multiple times. --replicate=<[group/]version/resource> # Replicate cluster-level resources. May be specified multiple times. --replicate-cluster=<[group/]version/resource> ``` By default resources are replicated from all namespaces. Use `--replicate-ignore-namespaces` option to exclude particular namespaces from replication. Kubernetes resources replicated into OPA are laid out as follows: ``` /// # namespace scoped // # cluster scoped ``` - `` is configurable (via `--replicate-path`) and defaults to `kubernetes`. - `` is the Kubernetes resource plural, e.g., `nodes`, `pods`, `services`, etc. - `` is the namespace of the Kubernetes resource. - `` is the name of the Kubernetes resource. For example, to search for services with the label `"foo"` you could write: ``` some namespace, name service := data.kubernetes.services[namespace][name] service.metadata.labels["foo"] ``` An alternative way to visualize the layout is as single JSON document: ```json { "kubernetes": { "services": { "default": { "example-service": {...}, "another-service": {...}, } } } } } ``` The example below would replicate Deployments, Services, and Nodes into OPA: ```bash --replicate=apps/v1beta/deployments --replicate=v1/services --replicate-cluster=v1/nodes ``` Custom Resource Definitions can also be replicated using the same `--replicate` and `--replicate-cluster` options. ## Admission Control To get started with admission control policy enforcement in Kubernetes 1.9 or later see the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial. For older versions of Kubernetes, see [Admission Control (1.7)](./docs/admission-control-1.7.md). In the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial, OPA is **NOT** running with an authorization policy configured and hence clients can read and write policies in OPA. When deploying OPA in an insecure environment, it is recommended to configure `authentication` and `authorization` on the OPA daemon. For an example of how OPA can be securely deployed as an admission controller see [Admission Control Secure](./docs/admission-control-secure.md). ## OPA API Endpoints and Least-privilege Configuration `kube-mgmt` is a privileged component that can load policy and data into OPA. Other clients connecting to the OPA API only need to query for policy decisions. To load policy and data into OPA, `kube-mgmt` uses the following OPA API endpoints: * `PUT v1/policy/` - upserting policies * `DELETE v1/policy/` - deleting policies * `PUT v1/data/` - upserting data * `PATCH v1/data/` - updating and removing data Many users configure OPA with a simple API authorization policy that restricts access to the OPA APIs: ```rego package system.authz # Deny access by default. default allow = false # Allow anonymous access to decision `data.example.response` # # NOTE: the specific decision differs depending on your policies. # NOTE: depending on how callers are configured, they may only require this or the default decision below. allow { input.path == ["v0", "data", "example", "response"] input.method == "POST" } # Allow anonymous access to default decision. allow { input.path == [""] input.method == "POST" } # This is only used for health check in liveness and readiness probe allow { input.path == ["health"] input.method == "GET" } # This is only used for prometheus metrics allow { input.path == ["metrics"] input.method == "GET" } # This is used by kube-mgmt to PUT/PATCH against /v1/data and PUT/DELETE against /v1/policies. # # NOTE: The $TOKEN value is replaced at deploy-time with the actual value that kube-mgmt will use. This is typically done by an initContainer. allow { input.identity == "$TOKEN" } ``` ## Development ### Environment setup This project uses [devbox](https://www.jetify.com/docs/devbox/installing-devbox/) to provide a fully isolated, reproducible development environment. All required tools (Go, just, OPA CLI, staticcheck, and others) are managed by devbox at pinned versions — no manual installation needed. To enter the development shell: ```bash devbox shell ``` This project uses `just` as a command runner, configured in [justfile](./justfile). Run `just` without arguments to list all available recipes. ### Running the application locally `kube-mgmt` runs in a local [k3d](https://k3d.io) Kubernetes cluster. Create the cluster once before first use: ```bash just all ``` Start and stop `kube-mgmt` application with: ```bash just up just down ``` Delete local k8s cluster ```sh just 3d-down ``` ### Tests The project has three categories of tests. #### Go unit tests Standard Go tests using the `testing` package: ```bash just test-go ``` #### Helm chart unit tests Chart rendering tests implemented with the [helm-unittest](https://github.com/helm-unittest/helm-unittest) plugin: ```bash just test-helm ``` #### End-to-end tests E2E tests deploy `kube-mgmt` to the local k3d cluster via [devspace](https://devspace.sh) and validate behavior using [chainsaw](https://kyverno.github.io/chainsaw/) (Kubernetes-native test framework) and [hurl](https://hurl.dev) (HTTP assertions). Each scenario is a directory under `test/e2e/`. Run a single scenario (shows an interactive picker when no argument is given): ```bash just test-e2e [test/e2e/] ``` Run all scenarios sequentially: ```bash just test-e2e-all ``` #### Linting ```bash just lint ``` Runs `go vet` and [staticcheck](https://staticcheck.io) for Go code, and helm-unittest lint rules for the Helm chart. #### Run all checks ```bash just test ``` Runs lint, Go unit tests, and Helm chart unit tests. ### Release To release a new version, create a [GitHub release](https://github.com/open-policy-agent/kube-mgmt/releases) with a tag that follows the [semantic versioning convention](https://semver.org/). Once the tag is pushed, the CI pipeline automatically builds and publishes all release artifacts: Docker images for all supported architectures and the Helm chart. ================================================ FILE: charts/opa-kube-mgmt/Chart.yaml ================================================ apiVersion: v1 appVersion: 0.0.0 # managed by git tag version: 0.0.0 # managed by git tag description: Manage OPA in Kubernetes with kube-mgmt sidecar. name: opa-kube-mgmt keywords: - opa - admission control - policy - kubernetes - security home: https://www.openpolicyagent.org icon: https://raw.githubusercontent.com/open-policy-agent/opa/master/logo/logo.png annotations: artifacthub.io/links: | - name: OPA source code url: https://github.com/open-policy-agent/opa - name: kube-mgmt source code url: https://github.com/open-policy-agent/kube-mgmt ================================================ FILE: charts/opa-kube-mgmt/README.md ================================================ # Manage OPA in Kubernetes with kube-mgmt sidecar. [OPA](https://www.openpolicyagent.org) is an open-source general-purpose policy engine designed for cloud-native environments. ## Overview This helm chart installs `OPA` together with `kube-mgmt` sidecar, that allows to manage OPA policies and data via Kubernetes ``ConfigMaps`. Optionally, the chart allows to install a [Kubernetes admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/). ## Installation ### Prerequisites - Kubernetes 1.9 (or newer) for validating and mutating webhook admission controller support. - Optional, cert-manager (https://docs.cert-manager.io/en/latest/) ### Default Installation If you just want to see something run, install the chart with default configuration. ```sh helm repo add opa https://open-policy-agent.github.io/kube-mgmt/charts helm repo update helm upgrade -i -n opa --create-namespace opa opa/opa-kube-mgmt ``` Once installed, the OPA will download a sample bundle from https://www.openpolicyagent.org. It contains a simple policy that restricts the hostnames that can be specified on Ingress objects created in the `opa-example` namespace. You can download the bundle and inspect it yourself: ```sh mkdir example && cd example curl -s -L https://www.openpolicyagent.org/bundles/kubernetes/admission | tar xzv ``` ### Installation from GitHub Packages (GHCR) The Helm chart and Docker image are also published to GitHub Container Registry (GHCR). Install the chart using OCI: ```sh helm upgrade -i -n opa --create-namespace opa \ oci://ghcr.io/open-policy-agent/helm/opa-kube-mgmt --version ``` The `kube-mgmt` Docker image is also published to GHCR. To pull it directly: ```sh # latest docker pull ghcr.io/open-policy-agent/docker/opa-kube-mgmt:latest # specific version docker pull ghcr.io/open-policy-agent/docker/opa-kube-mgmt: ``` To use the GHCR image when installing the chart: ```sh helm upgrade -i -n opa --create-namespace opa \ oci://ghcr.io/open-policy-agent/helm/opa-kube-mgmt \ --set mgmt.image.repository=ghcr.io/open-policy-agent/docker/opa-kube-mgmt \ --set mgmt.image.tag=latest ``` ## Configuration All configuration settings are contained and described in [values.yaml](values.yaml). You should set the URL and credentials for the OPA to use to download policies. The URL should identify an HTTP endpoint that implements the [OPA Bundle API](https://www.openpolicyagent.org/docs/bundles.html). - `opa.services.controller.url` specifies the base URL of the OPA control plane. - `opa.services.controller.credentials.bearer.token` specifies a bearer token for the OPA to use to authenticate with the control plane. For more information on OPA-specific configuration see the [OPA Configuration Reference](https://www.openpolicyagent.org/docs/configuration.html). ================================================ FILE: charts/opa-kube-mgmt/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "opa.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "opa.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} {{- define "opa.sarfullname" -}} {{- $name := (include "opa.fullname" . | trunc 59 | trimSuffix "-") -}} {{- printf "%s-sar" $name -}} {{- end -}} {{- define "opa.mgmtfullname" -}} {{- $name := (include "opa.fullname" . | trunc 58 | trimSuffix "-") -}} {{- printf "%s-mgmt" $name -}} {{- end -}} {{/* Create chart name and version as used by the chart label. */}} {{- define "opa.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Define standard labels for frequently used metadata. */}} {{- define "opa.labels.standard" -}} app: {{ template "opa.fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" {{- end -}} {{/* Create the name of the service account to use */}} {{- define "opa.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} {{ default (include "opa.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} {{- define "opa.selfSignedIssuer" -}} {{ printf "%s-selfsign" (include "opa.fullname" .) }} {{- end -}} {{- define "opa.rootCAIssuer" -}} {{ printf "%s-ca" (include "opa.fullname" .) }} {{- end -}} {{- define "opa.rootCACertificate" -}} {{ printf "%s-ca" (include "opa.fullname" .) }} {{- end -}} {{- define "opa.servingCertificate" -}} {{ printf "%s-webhook-tls" (include "opa.fullname" .) }} {{- end -}} {{/* Detect the version of cert manager crd that is installed Error if CRD is not available */}} {{- define "opa.certManagerApiVersion" -}} {{- if (.Capabilities.APIVersions.Has "cert-manager.io/v1") -}} cert-manager.io/v1 {{- else if (.Capabilities.APIVersions.Has "cert-manager.io/v1beta1") -}} cert-manager.io/v1beta1 {{- else -}} {{- fail "cert-manager CRD does not appear to be installed" }} {{- end -}} {{- end -}} {{/* Detect the available version of admissionregistration */}} {{- define "opa.admissionregistrationApiVersion" -}} {{- if (.Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1") -}} admissionregistration.k8s.io/v1 {{- else -}} admissionregistration.k8s.io/v1beta1 {{- end -}} {{- end -}} {{- define "opa.mgmt.image" -}} {{- $tag := .Values.mgmt.image.tag | default .Chart.AppVersion -}} {{ printf "%s:%s" .Values.mgmt.image.repository $tag }} {{- end -}} {{- define "opa.dnsPolicy" -}} {{- if .Values.dnsPolicyOverride -}} dnsPolicy: "{{ .Values.dnsPolicyOverride }}" {{ else if .Values.hostNetwork.enabled -}} dnsPolicy: "ClusterFirstWithHostNet" {{ end -}} {{ end -}} ================================================ FILE: charts/opa-kube-mgmt/templates/deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: {{ template "opa.fullname" . }} labels: {{ include "opa.labels.standard" . | indent 4 }} spec: replicas: {{ .Values.replicas }} selector: matchLabels: app: {{ template "opa.fullname" . }} {{- with .Values.deploymentStrategy }} strategy: {{- toYaml . | nindent 4 }} {{- end }} template: metadata: annotations: {{- if .Values.opa }} checksum/config: {{ tpl (toYaml .Values.opa) . | sha256sum }} {{- end }} {{- if .Values.admissionController.enabled }} checksum/webhookconfiguration: {{ include (print $.Template.BasePath "/webhookconfiguration.yaml" ) . | sha256sum }} {{- end }} {{- if .Values.annotations }} {{ toYaml .Values.annotations | indent 8 }} {{- end }} labels: app: {{ template "opa.fullname" . }} name: {{ template "opa.fullname" . }} spec: {{- if .Values.imagePullSecrets }} imagePullSecrets: {{- range .Values.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} {{- if or .Values.authz.enabled .Values.bootstrapPolicies}} initContainers: - name: initpolicy image: {{ include "opa.mgmt.image" . }} imagePullPolicy: {{ .Values.mgmt.image.pullPolicy }} resources: {{ toYaml .Values.mgmt.resources | indent 12 }} command: - /bin/sh - -c - | {{- if .Values.authz.enabled }} {{- if .Values.authz.mgmtToken}} cat /mgmt-token-secret/mgmt-token > /bootstrap/mgmt-token {{- else }} tr -dc 'A-F0-9' < /dev/urandom | dd bs=1 count=32 2>/dev/null > /bootstrap/mgmt-token {{- end }} TOKEN=`cat /bootstrap/mgmt-token` cat > /bootstrap/authz.rego < /bootstrap/{{ $policyName }}.rego < : |- # # For example, to mask the entire input body in the decision logs: # bootstrapPolicies: # log: |- # package system.log # mask["/input"] bootstrapPolicies: {} # Admission controller configuration. admissionController: enabled: false # To enforce mutating policies, change to MutatingWebhookConfiguration. kind: ValidatingWebhookConfiguration # To set annotations on all admissionController resources (Secret/Certificate/Issuer/AdmissionController) # annotations: # example: value # To _fail closed_ on failures, change to Fail. During initial testing, we # recommend leaving the failure policy as Ignore. failurePolicy: Ignore # Adds a namespace selector to the admission controller webhook namespaceSelector: matchExpressions: - {key: openpolicyagent.org/webhook, operator: NotIn, values: [ignore]} # SideEffectClass for the webhook, setting to NoneOnDryRun enables dry-run. # Only None and NoneOnDryRun are permitted for admissionregistration.k8s.io/v1. sideEffect: None # To restrict the kinds of operations and resources that are subject to OPA # policy checks, see the settings below. By default, all resources and # operations are subject to OPA policy checks. rules: - operations: ["*"] apiGroups: ["*"] apiVersions: ["*"] resources: ["*"] # The helm Chart will automatically generate a CA and server certificate for # the OPA. If you want to supply your own certificates, set the field below to # false and add the PEM encoded CA certificate and server key pair below. # # WARNING: The common name name in the server certificate MUST match the # hostname of the service that exposes the OPA to the apiserver. For example. # if the service name is created in the "default" nanamespace with name "opa" # the common name MUST be set to "opa.default.svc". # # If the common name is not set correctly, the apiserver will refuse to # communicate with the OPA. generateCerts: true CA: "" cert: "" key: "" # Controls a PodDisruptionBudget for the OPA pod. Suggested use if having opa # always running for admission control is important podDisruptionBudget: enabled: false minAvailable: 1 # maxUnavailable: 1 authz: # Disable if you don't want authorization. # Mostly useful for debugging. enabled: true # Used for setting the mgmt token used for authz instead of auto generated default # mgmtToken: # secretName: name of the secret # secretKey: (optional) key from the secret - default value is: "mgmtToken" # Use hostNetwork setting on OPA pod hostNetwork: enabled: false # OPA docker image configuration. image: repository: openpolicyagent/opa tag: 1.3.0 pullPolicy: IfNotPresent # One or more secrets to be used when pulling images imagePullSecrets: [] # - registrySecretName # Should OPA use TLS or not. useHttps: true # Port to which the opa pod will bind itself, port: 8181 extraArgs: [] # Extra environment variables to be loaded into the OPA container extraEnv: [] mgmt: enabled: true image: repository: openpolicyagent/kube-mgmt tag: "" # appVersion is used by default, set to desired value to override pullPolicy: IfNotPresent extraArgs: [] extraEnv: [] resources: {} # if empty - the current namespaces is watched # if `*` - all namespaces are watched namespaces: [] # kube-mgmt container will wait until OPA container comes to running state. # Configure values for the startup probe, where kube-mgmt queries for the health # of OPA container before it starts. startupProbe: failureThreshold: 5 httpGet: path: /health port: 8181 # Port on which OPA is configured scheme: HTTPS initialDelaySeconds: 20 successThreshold: 1 timeoutSeconds: 10 data: enabled: true policies: enabled: true # NOTE IF you use these, remember to update the RBAC rules below to allow # permissions to replicate these things replicate: cluster: [] # - [group/]version/resource namespace: [] # - [group/]version/resource path: kubernetes ignoreNs: [] # Turn on auto-replication. kube-mgmt will apply OPA configuration file # and analyze any configured bundles to determine which Kubernetes # resources to replicate into OPA's in-memory store. auto: false # Log level for OPA ('debug', 'info', 'error') (app default=info) logLevel: info # Log format for OPA ('text', 'json') (app default=text) logFormat: json # Number of OPA replicas to deploy. OPA maintains an eventually consistent # cache of policies and data. If you want high availability you can deploy two # or more replicas. replicas: 1 # To control how the OPA is scheduled on the cluster, set the affinity, # tolerations and nodeSelector values below. For example, to deploy OPA onto # the master nodes, 1 replica per node: # # affinity: # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: "app" # operator: In # values: # - opa # topologyKey: "kubernetes.io/hostname" # tolerations: # - key: "node-role.kubernetes.io/master" # effect: NoSchedule # operator: Exists # nodeSelector: # kubernetes.io/role: "master" affinity: {} tolerations: [] nodeSelector: {} # To control pod distribution across topology domains, set topologySpreadConstraints # below. # # topologySpreadConstraints: # - maxSkew: 1 # topologyKey: topology.kubernetes.io/zone # whenUnsatisfiable: DoNotSchedule # labelSelector: # matchLabels: # app: opa topologySpreadConstraints: [] # To control the CPU and memory resource limits and requests for OPA, set the # field below. resources: {} rbac: # should ClusterRole for kube-mgmt be created create: true # extra rules to be added to a ClusterRole extraRules: [] # - apiGroups: [""] # resources: ["configmaps"] # verbs: ["*"] serviceAccount: # Specifies whether a ServiceAccount should be created create: true # Annotations for the ServiceAccount annotations: {} # The name of the ServiceAccount to use. # If not set and create is true, a name is generated using the fullname template name: # This proxy allows opa to make Kubernetes SubjectAccessReview checks against the # Kubernetes API. You can get a rego function at github.com/open-policy-agent/library sar: enabled: false image: repository: lachlanevenson/k8s-kubectl tag: latest pullPolicy: IfNotPresent resources: {} # Set a priorityClass using priorityClassName # priorityClassName: # Timeout for a webhook call in seconds. # Starting in kubernetes 1.14 you can set the timeout and it is # encouraged to use a small timeout for webhooks. If the webhook call times out, the request # the request is handled according to the webhook'sfailure policy. # timeoutSeconds: 20 securityContext: enabled: false runAsNonRoot: true runAsUser: 1 deploymentStrategy: {} # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 # type: RollingUpdate extraContainers: [] ## Additional containers to be added to the opa pod. # - name: example-app # image: example/example-app:latest # args: # - "run" # - "--port=11811" # - "--config=/etc/example-app-conf/config.yaml" # - "--opa-endpoint=https://localhost:443" # ports: # - name: http # containerPort: 11811 # protocol: TCP # volumeMounts: # - name: example-app-auth-config # mountPath: /etc/example-app-conf extraVolumes: [] ## Additional volumes to the opa pod. # - name: example-app-auth-config # secret: # secretName: example-app-auth-config extraVolumeMounts: [] ## Mounting config for using the additional volumes # - name: example-app-auth-config # mountPath: /mount/path extraPorts: [] ## Additional ports to the opa services. Useful to expose extra container ports. # - port: 11811 # protocol: TCP # name: http # targetPort: http ================================================ FILE: cmd/kube-mgmt/flag.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package main import ( "errors" "fmt" "strings" ) type groupVersionKind struct { Group string Version string Kind string } var errBadFormat = errors.New("format: group/version/kind") func (gvk groupVersionKind) String() string { if gvk.Group != "" { return fmt.Sprintf("%v/%v/%v", gvk.Group, gvk.Version, gvk.Kind) } return fmt.Sprintf("%v/%v", gvk.Version, gvk.Kind) } func (gvk *groupVersionKind) Parse(value string) error { parts := strings.SplitN(value, "/", 3) for i := range parts { if len(parts[i]) == 0 { return errBadFormat } parts[i] = strings.ToLower(parts[i]) } if len(parts) < 2 { return errBadFormat } if len(parts) == 2 { gvk.Version = parts[0] gvk.Kind = parts[1] } else { gvk.Group = parts[0] gvk.Version = parts[1] gvk.Kind = parts[2] } return nil } type gvkFlag []groupVersionKind func (f *gvkFlag) String() string { return fmt.Sprint(*f) } func (f *gvkFlag) Set(value string) error { var gvk groupVersionKind if err := gvk.Parse(value); err != nil { return err } *f = append(*f, gvk) return nil } func (f *gvkFlag) Type() string { return "[group/]version/resource" } ================================================ FILE: cmd/kube-mgmt/flag_test.go ================================================ package main import ( "errors" "reflect" "testing" "github.com/open-policy-agent/kube-mgmt/pkg/configmap" "github.com/spf13/cobra" ) func TestFlagParsing(t *testing.T) { var f gvkFlag badPaths := []string{ "foo/bar/", "foo", } for _, tc := range badPaths { if err := f.Set(tc); err == nil { t.Fatalf("Expected error from %v", tc) } } expected := gvkFlag{ {"example.org", "foo", "bar"}, } if err := f.Set("example.org/Foo/bar"); err != nil || !reflect.DeepEqual(expected, f) { t.Fatalf("Expected %v but got: %v (err: %v)", expected, f, err) } expected = append(expected, groupVersionKind{"example.org", "bar", "baz"}) if err := f.Set("example.org/Bar/baz"); err != nil || !reflect.DeepEqual(expected, f) { t.Fatalf("Expected %v but got: %v (err: %v)", expected, f, err) } expected = append(expected, groupVersionKind{"", "v2", "corge"}) if err := f.Set("v2/corge"); err != nil || !reflect.DeepEqual(expected, f) { t.Fatalf("Expected %v but got: %v (err: %v)", expected, f, err) } } func TestFlagString(t *testing.T) { var f gvkFlag expected := "[example.org/foo/bar]" if err := f.Set("example.org/foo/bar"); err != nil || f.String() != expected { t.Fatalf("Exepcted %v but got: %v (err: %v)", expected, f.String(), err) } } func TestPolicyFlags(t *testing.T) { tt := []struct { name string flag string value string expectFullFlag string err error }{ { name: "valid", flag: "openpolicyagent.org/policy", value: "rego", expectFullFlag: "openpolicyagent.org/policy=rego", err: nil, }, { name: "invalidFlag", flag: "-foo", value: "rego", expectFullFlag: "", err: errors.New(`key: Invalid value: "-foo": name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`), }, { name: "invalidValue", flag: "foo", value: "-rego", expectFullFlag: "", err: errors.New(`values[0][foo]: Invalid value: "-rego": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')`), }, } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { rootCmd := &cobra.Command{ Use: "test", Short: "test", RunE: func(cmd *cobra.Command, args []string) error { return nil }, } var params params rootCmd.Flags().StringVarP(¶ms.policyLabel, "policy-label", "", "", "replace label openpolicyagent.org/policy") rootCmd.Flags().StringVarP(¶ms.policyValue, "policy-value", "", "", "replace value rego") rootCmd.SetArgs([]string{"--policy-label=" + tc.flag, "--policy-value=" + tc.value}) rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { if rootCmd.Flag("policy-label").Value.String() != "" || rootCmd.Flag("policy-value").Value.String() != "" { err := configmap.CustomLabel(params.policyLabel, params.policyValue) if err != nil { if tc.err.Error() != err.Error() { t.Errorf("exp: %v\ngot: %v\n", tc.err.Error(), err.Error()) t.FailNow() } } } return nil } rootCmd.Execute() }) } } ================================================ FILE: cmd/kube-mgmt/main.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package main import ( "context" "crypto/tls" "crypto/x509" "fmt" "net/http" "os" "path" "strings" "github.com/open-policy-agent/kube-mgmt/pkg/configmap" "github.com/open-policy-agent/kube-mgmt/pkg/data" "github.com/open-policy-agent/kube-mgmt/pkg/dynamicdata" "github.com/open-policy-agent/kube-mgmt/pkg/opa" "github.com/open-policy-agent/kube-mgmt/pkg/types" "github.com/open-policy-agent/kube-mgmt/pkg/version" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/logging" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) type params struct { version bool kubeconfigFile string opaURL string opaAuth string opaAuthFile string opaCAFile string opaAllowInsecure bool policyLabel string policyValue string dataLabel string dataValue string enablePolicies bool enableData bool namespaces []string opaConfigFile string replicateCluster gvkFlag replicateNamespace gvkFlag replicatePath string logLevel string replicateIgnoreNs []string analysisEntrypoint string healthEndpoint string } func main() { var params params commandName := path.Base(os.Args[0]) rootCmd := &cobra.Command{ Use: commandName, Short: fmt.Sprintf("%v manages OPA on top of Kubernetes", commandName), FParseErrWhitelist: cobra.FParseErrWhitelist{ UnknownFlags: true, }, Run: func(cmd *cobra.Command, args []string) { if params.version { fmt.Println("Version:", version.Version) fmt.Println("Git:", version.Git) } else { run(¶ms) } }, } // Miscellaenous options. rootCmd.Flags().BoolVarP(¶ms.version, "version", "v", false, "print version and exit") rootCmd.Flags().StringVarP(¶ms.kubeconfigFile, "kubeconfig", "", "", "set path to kubeconfig manually") rootCmd.Flags().StringVarP(¶ms.opaURL, "opa-url", "", "http://localhost:8181/v1", "set URL of OPA API endpoint") rootCmd.Flags().StringVarP(¶ms.opaAuth, "opa-auth-token", "", "", "set authentication token for OPA API endpoint") rootCmd.Flags().StringVarP(¶ms.opaAuthFile, "opa-auth-token-file", "", "", "set file containing authentication token for OPA API endpoint") rootCmd.Flags().StringVarP(¶ms.opaCAFile, "opa-ca-file", "", "", "set file containing certificate authority for OPA certificate") rootCmd.Flags().BoolVarP(¶ms.opaAllowInsecure, "opa-allow-insecure", "", false, "allow insecure https connections to OPA") rootCmd.Flags().StringVar(¶ms.logLevel, "log-level", "info", "set log level {debug, info, warn}") // policy / data rootCmd.Flags().BoolVarP(¶ms.enablePolicies, "enable-policies", "", true, "whether to automatically discover policies from labelled ConfigMaps") rootCmd.Flags().StringVar(¶ms.policyLabel, "policy-label", "openpolicyagent.org/policy", "label name for filtering ConfigMaps with policies") rootCmd.Flags().StringVar(¶ms.policyValue, "policy-value", "rego", "label value for filtering ConfigMaps with policies") rootCmd.Flags().BoolVarP(¶ms.enableData, "enable-data", "", true, "whether to automatically discover data from labelled ConfigMaps") rootCmd.Flags().StringVar(¶ms.dataLabel, "data-label", "openpolicyagent.org/data", "label name for filtering ConfigMaps with data") rootCmd.Flags().StringVar(¶ms.dataValue, "data-value", "opa", "label value for filtering ConfigMaps with data") rootCmd.Flags().StringSliceVarP(¶ms.namespaces, "namespaces", "", []string{""}, "namespaces to load policies and data from") // replication rootCmd.Flags().VarP(¶ms.replicateNamespace, "replicate", "", "replicate namespace-level resources") rootCmd.Flags().VarP(¶ms.replicateCluster, "replicate-cluster", "", "replicate cluster-level resources") rootCmd.Flags().StringVarP(¶ms.replicatePath, "replicate-path", "", "kubernetes", "set path to replicate data into") rootCmd.Flags().StringSliceVarP(¶ms.replicateIgnoreNs, "replicate-ignore-namespaces", "", []string{""}, "namespaces that are ignored by replication") rootCmd.Flags().StringVarP(¶ms.opaConfigFile, "opa-config", "", "", "set file containing OPA configuration to enable data replication based on configured bundles") rootCmd.Flags().StringVarP(¶ms.analysisEntrypoint, "analysis-entrypoint", "", "main/main", "set decision to analyze for dynamic data replication configuration (requires --opa-config)") rootCmd.Flags().StringVarP(¶ms.healthEndpoint, "health-endpoint", "", "", "set health check listening endpoint (e.g., localhost:8000)") rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { if rootCmd.Flag("policy-label").Value.String() != "" || rootCmd.Flag("policy-value").Value.String() != "" { err := configmap.CustomLabel(params.policyLabel, params.policyValue) if err != nil { logrus.Fatalf("Invalid --policy-label:%v || --policy-value:%v, %v", params.policyLabel, params.policyValue, err) } } if rootCmd.Flag("data-label").Value.String() != "" || rootCmd.Flag("data-value").Value.String() != "" { err := configmap.CustomLabel(params.dataLabel, params.dataValue) if err != nil { logrus.Fatalf("Invalid --data-label:%v || --data-value:%v, %v", params.dataLabel, params.dataValue, err) } } return nil } if err := rootCmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } func run(params *params) { switch params.logLevel { case "debug": logrus.SetLevel(logrus.DebugLevel) case "info": logrus.SetLevel(logrus.InfoLevel) case "warn": logrus.SetLevel(logrus.WarnLevel) default: logrus.Fatalf("Invalid log level %v", params.logLevel) } kubeconfig, err := loadRESTConfig(params.kubeconfigFile) if err != nil { logrus.Fatalf("Failed to load kubeconfig: %v", err) } if params.opaAuthFile != "" && params.opaAuth != "" { logrus.Fatalf("You can not use both --opa-auth-token and --opa-auth-token-file") } if params.opaAuthFile != "" { file, err := os.ReadFile(params.opaAuthFile) if err != nil { logrus.Fatalf("Failed to read opa auth token file %s", params.opaAuthFile) } params.opaAuth = strings.Split(string(file), "\n")[0] } if params.opaAllowInsecure && params.opaCAFile != "" { logrus.Fatalf("You can not use both --opa-allow-insecure and --opa-ca-file") } if params.opaAllowInsecure { config := &tls.Config{InsecureSkipVerify: params.opaAllowInsecure} http.DefaultTransport.(*http.Transport).TLSClientConfig = config } if params.opaCAFile != "" { rootCAs, _ := x509.SystemCertPool() if rootCAs == nil { rootCAs = x509.NewCertPool() } certs, err := os.ReadFile(params.opaCAFile) if err != nil { logrus.Fatalf("Failed to read opa certificate authority file %s", params.opaCAFile) } if ok := rootCAs.AppendCertsFromPEM(certs); !ok { logrus.Println("No certs appended, using system certs only") } config := &tls.Config{RootCAs: rootCAs} http.DefaultTransport.(*http.Transport).TLSClientConfig = config } if params.enablePolicies || params.enableData { sync := configmap.New( kubeconfig, opa.New(params.opaURL, params.opaAuth), configmap.DefaultConfigMapMatcher( params.namespaces, params.enablePolicies, params.enableData, params.policyLabel, params.policyValue, params.dataLabel, params.dataValue, ), ) _, err = sync.Run(params.namespaces) if err != nil { logrus.Fatalf("Failed to start configmap sync: %v", err) } } if len(params.replicateCluster)+len(params.replicateNamespace) > 0 { client, err := dynamic.NewForConfig(kubeconfig) if err != nil { logrus.Fatalf("Failed to get dynamic client: %v", err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() opts := data.WithIgnoreNamespaces(params.replicateIgnoreNs) for _, gvk := range params.replicateCluster { sync := data.NewFromInterface(client, opa.New(params.opaURL, params.opaAuth).Prefix(params.replicatePath), getResourceType(gvk, false), opts) go sync.RunContext(ctx) } for _, gvk := range params.replicateNamespace { sync := data.NewFromInterface(client, opa.New(params.opaURL, params.opaAuth).Prefix(params.replicatePath), getResourceType(gvk, true), opts) go sync.RunContext(ctx) } } var sync *dynamicdata.Sync if params.opaConfigFile != "" { logger := logging.New() switch params.logLevel { case "debug": logger.SetLevel(logging.Debug) case "info": logger.SetLevel(logging.Info) case "error": logger.SetLevel(logging.Error) } sync, err = dynamicdata.New(params.opaConfigFile, params.analysisEntrypoint, params.opaURL, params.opaAuth, params.replicateIgnoreNs, params.replicatePath, kubeconfig, logger) if err != nil { logrus.Fatalf("Failed to create dynamic synchronizer: %v", err) } go sync.Run(context.Background()) } if params.healthEndpoint != "" { go func() { mux := http.NewServeMux() mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { if sync == nil || sync.Ready() { logrus.Debugf("health check: READY") w.WriteHeader(http.StatusOK) } else { logrus.Debugf("health check: NOT READY") w.WriteHeader(http.StatusInternalServerError) } }) server := &http.Server{ Addr: params.healthEndpoint, Handler: mux, } logrus.Infof("Starting health server on %v", params.healthEndpoint) if err := server.ListenAndServe(); err != nil { logrus.Fatalf("Error starting health server: %v", err) } }() } quit := make(chan struct{}) <-quit } func loadRESTConfig(path string) (*rest.Config, error) { if path != "" { return clientcmd.BuildConfigFromFlags("", path) } return rest.InClusterConfig() } func getResourceType(gvk groupVersionKind, namespaced bool) types.ResourceType { return types.ResourceType{ Namespaced: namespaced, Group: gvk.Group, Version: gvk.Version, Resource: gvk.Kind, } } ================================================ FILE: devbox.json ================================================ { "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.13.7/.schema/devbox.schema.json", "packages": [ "go@1.24", "just@1", "kubectl@1.33", "k3d@5.8", "kubernetes-helm@3", "go-tools@2026", "open-policy-agent@1", "hurl@7", "fzf@0", "devspace@6", "yq-go@4", "ko@0", "oras@1" ], "env": { "GOPATH": "$HOME/go/", "PATH": "$PATH:$HOME/go/bin" }, "shell": { "init_hook": [ "command -v chainsaw &>/dev/null || go install github.com/kyverno/chainsaw@v0.2.13", "helm plugin list | grep -q unittest || helm plugin install https://github.com/helm-unittest/helm-unittest --version v1.0.3" ] } } ================================================ FILE: devspace.yaml ================================================ version: v2beta1 name: opa-kube-mgmt vars: DEVSPACE_FLAGS: "-n default --no-warn" KO_PLATFORMS: "linux/amd64" KO_EXTRA_TAGS: "" images: default: image: localhost:5001/openpolicyagent/kube-mgmt tags: - $(git describe --tags --always --dirty) custom: command: | export KO_DOCKER_REPO=${runtime.images.default.image} export KO_VERSION=${runtime.images.default.tag} export KO_COMMIT=${DEVSPACE_GIT_COMMIT} ko build --bare --tags ${KO_VERSION}${KO_EXTRA_TAGS} --platform=${KO_PLATFORMS} ./cmd/kube-mgmt deployments: default: namespace: default helm: releaseName: ${DEVSPACE_NAME} chart: path: charts/${DEVSPACE_NAME} values: e2e: true mgmt: image: repository: ${runtime.images.default.image} tag: ${runtime.images.default.tag} valuesFiles: - "${E2E_TEST}/values.yaml" upgradeArgs: - "--wait" - "--install" hooks: - name: "helm package and copy to ghcr" events: ["after:build:default"] disabled: true command: | helm package charts/${DEVSPACE_NAME} \ --version ${runtime.images.default.tag} --app-version ${runtime.images.default.tag} helm push ${DEVSPACE_NAME}-${runtime.images.default.tag}.tgz oci://ghcr.io/open-policy-agent/helm - name: "copy docker image to ghcr" events: ["after:build:default"] disabled: true command: | oras cp docker.io/${runtime.images.default.image}:${runtime.images.default.tag} \ ghcr.io/open-policy-agent/docker/${DEVSPACE_NAME}:${runtime.images.default.tag},latest - name: "e2e cleanup" events: ["before:deploy:default"] command: | kubectl delete cm -l kube-mgmt/e2e=true -n ${DEVSPACE_NAMESPACE} --ignore-not-found kubectl delete svc -l kube-mgmt/e2e=true -n ${DEVSPACE_NAMESPACE} --ignore-not-found profiles: - name: release patches: - op: replace path: images.default.image value: openpolicyagent/kube-mgmt - op: replace path: vars.KO_PLATFORMS value: "linux/amd64,linux/arm64" - op: replace path: vars.KO_EXTRA_TAGS value: ",latest" - op: replace path: hooks[0].disabled value: false - op: replace path: hooks[1].disabled value: false ================================================ FILE: docs/admission-control-1.7.md ================================================ # Admission Control (1.7 and 1.8) **Note: Admission Control has undergone changes in Kubernetes 1.7 through 1.9. If you are running Kubernetes 1.9, see [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) instead.** To use OPA as an [Admission Controller](https://kubernetes.io/docs/admin/admission-controllers/#what-are-they) in Kubernetes 1.7 or 1.8, follow the steps in [External Admission Webhooks](https://kubernetes.io/docs/admin/extensible-admission-controllers/#external-admission-webhooks) to enable webhooks in the Kubernetes API server. Once you have configured the Kubernetes API server and generated the necessary certificates you can start `kube-mgmt` with the following options: ```bash --register-admission-controller --admission-controller-ca-cert-file=/path/to/ca/cert.pem --admission-controller-service-name= --admission-controller-service-namespace= ``` In addition to the command line arguments above, you must provide `--pod-name` and `--pod-namespace` using [Kubernetes' Downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/). The example manifest below shows how to set these. You will need to create Secrets containing the server certificate and private key as well as the CA certificate: ```bash kubectl create secret generic opa-ca --from-file=ca.crt kubectl create secret tls opa-server --cert=server.crt --key=server.key ``` > See [Generating TLS Certificates](./tls-1.7.md) below for examples of how to generate the certificate files. The example below shows how to deploy OPA and enable admission control: ```yaml apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: app: opa name: opa spec: replicas: 1 template: metadata: labels: app: opa name: opa spec: containers: - name: opa image: openpolicyagent/opa args: - "run" - "--server" - "--tls-cert-file=/certs/tls.crt" - "--tls-private-key-file=/certs/tls.key" - "--addr=0.0.0.0:443" - "--insecure-addr=127.0.0.1:8181" volumeMounts: - readOnly: true mountPath: /certs name: opa-server - name: kube-mgmt image: openpolicyagent/kube-mgmt:0.6 args: - "--pod-name=$(MY_POD_NAME)" - "--pod-namespace=$(MY_POD_NAMESPACE)" - "--register-admission-controller" - "--admission-controller-ca-cert-file=/certs/ca.crt" - "--admission-controller-service-name=opa" - "--admission-controller-service-namespace=$(MY_POD_NAMESPACE)" volumeMounts: - readOnly: true mountPath: /certs name: opa-ca env: - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumes: - name: opa-server secret: secretName: opa-server - name: opa-ca secret: secretName: opa-ca --- kind: Service apiVersion: v1 metadata: name: opa spec: clusterIP: 10.0.0.222 selector: app: opa ports: - name: https protocol: TCP port: 443 targetPort: 443 ``` Admission control policies must produce a document at `/system/main` that represents the admission control decision (i.e., allow or deny). #### Example Policy To test that admission control is working, define a policy that rejects the request if the `test-reject` label is found: ```ruby package system main = { "apiVersion": "admission.k8s.io/v1alpha1", "kind": "AdmissionReview", "status": status, } default status = {"allowed": true} status = reject { input.spec.operation = "CREATE" input.spec.object.labels["test-reject"] } reject = { "allowed": false, "status": { "reason": "testing rejection" } } ``` ================================================ FILE: docs/admission-control-crd.md ================================================ # Admission Control For Custom Resources In the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial we have seen how OPA can be deployed as an admission controller to enforce custom policies on Kubernetes objects. In that tutorial, policies were enforced on native Kubernetes objects such as ingresses. ## Goal This tutorial will show how OPA can be used to enforce polices on custom resources. A custom resource is an extension of the Kubernetes API that is not necessarily available on every Kubernetes cluster. More inforation on Kubernetes custom resources is available [here](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). The additional steps that need to be taken to achieve this are: 1. Define a role for reading Kubernetes custom resources. 2. Grant OPA/kube-mgmt permissions to read Kubernetes custom resources. 3. Configure `kube-mgmt` to load Kubernetes custom resources into OPA. ## Prerequisites Same as the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial. ## Steps ### 1. Start minikube ```bash minikube start ``` Follow the steps in the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial to create the `opa` namespace and configure TLS. ### 2. Create a CustomResourceDefinition Save the following CustomResourceDefinition to **resourcedefinition.yaml**: ```yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: cats.opa.example.com spec: group: opa.example.com version: "v1" scope: Namespaced names: plural: cats singular: cat kind: Cat shortNames: - ct ``` And create it: ```bash kubectl create -f resourcedefinition.yaml ``` ### 3. Deploy OPA on top of Kubernetes Use the **admission-controlller.yaml** file from the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial to deploy OPA as an admission controller with the following changes: 1. Define a role for reading the Kubernetes custom resource created in the previous step. ```yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: crd-reader rules: - apiGroups: ["opa.example.com"] resources: ["cats"] verbs: ["get", "list", "watch"] ``` 2. Grant OPA/kube-mgmt permissions to the above role. ```yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: opa-crd-reader roleRef: kind: ClusterRole name: crd-reader apiGroup: rbac.authorization.k8s.io subjects: - kind: Group name: system:serviceaccounts:opa apiGroup: rbac.authorization.k8s.io ``` 3. Update the `kube-mgmt` container spec to load the Kubernetes custom resources into OPA. ```yaml name: kube-mgmt args: - "--replicate=opa.example.com/v1/cats" # replicate custom resources ``` Now follow the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial to deploy OPA on top of Kubernetes and register OPA as an admission controller. ### 4. Define a policy and load it into OPA via Kubernetes Create a policy that rejects objects of kind `Cat` from sharing the same cat name. **name-conflicts.rego**: ```ruby package kubernetes.admission import data.kubernetes.cats # Cat names must be unique. deny[msg] { input.request.kind.kind = "Cat" input.request.operation = "CREATE" name := input.request.object.spec.name cat := cats[other_ns][other_cat] cat.spec.name == name msg = sprintf("duplicate cat name %q (conflicts with %v/%v)", [name, other_ns, other_cat]) } ``` ```bash kubectl create configmap name-conflicts --from-file=name-conflicts.rego ``` ### 5. Exercise the policy Define two objects of kind `Cat`. The first one will be permitted and the second will be rejected. **cat.yaml**: ```yaml apiVersion: "opa.example.com/v1" kind: Cat metadata: name: my-new-cat-object spec: name: Whiskers ``` **cat-duplicate.yaml**: ```yaml apiVersion: "opa.example.com/v1" kind: Cat metadata: name: my-duplicate-cat-object spec: name: Whiskers ``` Finally, try to create both `Cat` objects: ```bash kubectl create -f cat.yaml kubectl create -f cat-duplicate.yaml ``` The second object will be rejected since an object with the cat name `Whiskers` was created earlier. ================================================ FILE: docs/admission-control-secure.md ================================================ # Admission Control Secure In the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial we have seen how OPA can be deployed as an admission controller. In that tutorial, OPA is not configured to `authenticate` and `authorize` client requests. ## Goal This tutorial will show how to securely deploy OPA as an admission controller. The additional steps that need to be taken to achieve this are: 1. Start `OPA` with authentication and authorization enabled using the `--authentication` and `--authorization` options respectively. 2. Volume mount OPA's startup authorization policy into the OPA container. 3. Start `kube-mgmt` with `Bearer` token flag using the `--opa-auth-token-file` option. 4. Configure `kube-mgmt` to load polices stored in ConfigMaps that are created in the `opa` namespace and are labelled `openpolicyagent.org/policy=rego`. 5. Configure the Kubernetes API server to use `Bearer` token. ## Prerequisites Same as the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial. ## Steps ### 1. Configure Kubernetes API server OPA will `authenticate` clients by extracting the `Bearer` token from the incoming API requests. Hence the Kubernetes API server needs to be configured to send a `Bearer` token in all requests to OPA. To do this, the API server must be provided with an admission control configuration file via the `--admission-control-config-file` flag during startup. This means the configuration file should be present inside the minikube VM at a location which is accessible to the API server pod. Start minikube: ```bash minikube start ``` `ssh` into the minikube VM and place the configuration files (**admission-control-config.yaml** and **kube-config.yaml**) below inside `/var/lib/minikube/certs`. This directory is accessible inside the API server pod. **admission-control-config.yaml** ```yaml apiVersion: apiserver.k8s.io/v1alpha1 kind: AdmissionConfiguration plugins: - name: ValidatingAdmissionWebhook configuration: apiVersion: apiserver.config.k8s.io/v1alpha1 kind: WebhookAdmission kubeConfigFile: /var/lib/minikube/certs/kube-config.yaml ``` **kube-config.yaml** ```yaml apiVersion: v1 kind: Config users: # '*' is the default match. - name: '*' user: token: ``` With the above configuration, all requests the API server makes to OPA will include a `Bearer` token. You will need to generate the `Bearer` token (``) and later include it in OPA's startup authorization policy so that OPA can verify the identity of the API server. Now exit the minikube VM and stop it: ```bash minikube stop ``` Start minikube by passing information about the admission control configuration file to the API server: ```bash minikube start --extra-config=apiserver.admission-control-config-file=/var/lib/minikube/certs/admission-control-config.yaml ``` Make sure that the minikube ingress addon is enabled: ```bash minikube addons enable ingress ``` Follow the steps in the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial to create the `opa` namespace and configure TLS. Now use the **admission-controller.yaml** file from the tutorial to deploy OPA as an admission controller with the following changes: 1. Use the below `opa` and `kube-mgmt` container spec which enables OPA's security features and configures `kube-mgmt` to include a `Bearer` token in calls to OPA. We also volume mount OPA's startup authorization policy `authz.rego` inside the OPA container in the `/policies` directory. ```yaml spec: containers: - name: opa image: openpolicyagent/opa:0.42.1 args: - "run" - "--server" - "--tls-cert-file=/certs/tls.crt" - "--tls-private-key-file=/certs/tls.key" - "--addr=0.0.0.0:443" - "--addr=http://127.0.0.1:8181" - "--authentication=token" - "--authorization=basic" - "/policies/authz.rego" # authorization policy used on startup - "--ignore=.*" # exclude hidden dirs created by Kubernetes volumeMounts: - readOnly: true mountPath: /certs name: opa-server - readOnly: true mountPath: /policies name: inject-policy - name: kube-mgmt image: openpolicyagent/kube-mgmt:7.0.6 args: - "--replicate-cluster=v1/namespaces" - "--replicate=extensions/v1beta1/ingresses" - "--opa-auth-token-file=/policies/token" volumeMounts: - readOnly: true mountPath: /policies name: inject-policy volumes: - name: opa-server secret: secretName: opa-server - name: inject-policy secret: secretName: inject-policy ``` 2. Include the Secret that contains OPA's startup authorization policy. ```bash cat > authz.rego < = input.identity } EOF kubectl create secret generic inject-policy -n opa --from-file=authz.rego --from-literal=token=kube-mgmt ``` If you have liveness or readiness probes configured on the OPA server for `/health` you will need to add the following `allow` rule to ensure Kubernetes can still access these endpoints. ``` # Allow anonymouse access to /health otherwise K8s get 403 and kills pod. allow { input.path = ["health"] } ``` 3. Label the `opa-default-system-main` ConfigMap. ```yaml --- kind: ConfigMap apiVersion: v1 metadata: name: opa-default-system-main namespace: opa labels: openpolicyagent.org/policy: rego data: main: | package system import data.kubernetes.admission main = { "apiVersion": "admission.k8s.io/v1beta1", "kind": "AdmissionReview", "response": response, } default response = {"allowed": true} response = { "allowed": false, "status": { "reason": reason, }, } { reason = concat(", ", admission.deny) reason != "" } ``` When OPA starts, the `kube-mgmt` container will load Kubernetes Namespace and Ingress objects into OPA. `kube-mgmt` will automatically discover policies stored in ConfigMaps in Kubernetes and load them into OPA. `kube-mgmt` assumes a ConfigMap contains policies if the ConfigMap is: - Created in a namespace listed in the `--namespaces` option. Default namespace is `opa`. - Labelled with `openpolicyagent.org/policy=rego`. `kube-mgmt` is started with the `--opa-auth-token-file` flag and hence all requests made to OPA will include a `Bearer` token(`kube-mgmt` in this case). You can now follow the [Kubernetes Admission Control](http://www.openpolicyagent.org/docs/kubernetes-admission-control.html) tutorial to deploy OPA on top of Kubernetes and test admission control. **Make sure to label the ConfigMap when you store a policy inside it.** ================================================ FILE: docs/tls-1.7.md ================================================ # Generating TLS Certificates (1.7) External Admission Controllers must be secured with TLS. At a minimum you must: - Provide the Kubernetes API server with a client key to use for webhook calls (`client.key` and `client.crt` below). - Provide OPA with a server key so that the Kubernetes API server can authenticate it (`server.key` and `server.crt` below). - Provide `kube-mgmt` with the CA certificate to register with the Kubernetes API server (`ca.crt` below). Follow the steps below to generate the necessary files for test purposes. First, generate create the required OpenSSL configuration files: **client.conf**: ``` [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, serverAuth subjectAltName = @alt_names [alt_names] IP.1 = 127.0.0.1 ``` **server.conf**: ``` [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth, serverAuth subjectAltName = @alt_names [alt_names] IP.1 = 10.0.0.222 ``` > The subjectAltName/IP address in the certificate MUST match the one configured > on the Kubernetes Service. Finally, generate the CA and client/server key pairs. ```bash # Create a certificate authority openssl genrsa -out ca.key 2048 openssl req -x509 -new -nodes -key ca.key -days 100000 -out ca.crt -subj "/CN=admission_ca" # Create a server certiticate openssl genrsa -out server.key 2048 openssl req -new -key server.key -out server.csr -subj "/CN=admission_server" -config server.conf openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 100000 -extensions v3_req -extfile server.conf # Create a client certiticate openssl genrsa -out client.key 2048 openssl req -new -key client.key -out client.csr -subj "/CN=admission_client" -config client.conf openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 100000 -extensions v3_req -extfile client.conf ``` If you are using minikube, you can specify the client TLS credentials with the following `minikube start` options: ``` --extra-config=apiserver.ProxyClientCertFile=/path/to/client.crt # in VM --extra-config=apiserver.ProxyClientKeyFile=/path/to/client.key # in VM ``` ================================================ FILE: examples/service_validation/README.md ================================================ # Kubernetes Admission Control for preventing open AWS LoadBalancers Kubernetes Service objects of type [LoadBalancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) on AWS create an Elastic LoadBalancer or Network LoadBalancer. However, if not properly configured, these LoadBalancers can be open to the world exposing EC2 instances behind them to security breaches. OPA can provide a good ValidationWebhook for ensuring that Service objects of type LoadBalancer do not accidentally create a LoadBalancer open to the world. ## Goals This tutorial shows how to create validation webhooks for Service objects and enforcing the LoadBalancer policies. - Kubernetes Service objects of type LoadBalancer that do not have `spec.loadBalancerSourceRanges` are rejected. - Users are required to explicitly set `spec.loadBalancerSourceRanges`. If users want to create LoadBalancers that are actually open to the world, they should explicitly set `spec.loadBalancerSourceRanges` to `0.0.0.0/0`. ## Prerequisites This tutorial has been tested with Kubernetes 1.10 running on AWS with RBAC enabled. But it should work with Kubernetes 1.9 or higher. ## Steps ### The simplest way to setup opa and policies would be to run the install.sh script. ```bash $ ./install.sh ``` Otherwise, here are the detailed steps: ### 1. Start Kubernetes with ValidatingAdmissionWebhook admission controller enabled. ### 2. Create the namespace called `opa` in it. ```bash kubectl create namespace opa ``` ### 3. Create the SSL certs required for the webhook. Same as [this](https://github.com/open-policy-agent/opa/blob/master/docs/book/kubernetes-admission-control.md#3-deploy-opa-on-top-of-kubernetes) ```bash openssl genrsa -out ca.key 2048 openssl req -x509 -new -nodes -key ca.key -days 100000 -out ca.crt -subj "/CN=admission_ca" ``` Generate the TLS key and certificate for OPA: ```bash cat >server.conf < Note: the Common Name value you give to openssl MUST match the name of the OPA service created below. Create a Secret to store the TLS credentials for OPA: ```bash kubectl create secret tls opa-server --cert=server.crt --key=server.key ``` In the admission_controller.yaml file in this example, replace the REPLACE_WITH_SECRET with the base64 encoded ```bash kubectl apply -f ./examples/service_validation/admission-controller.yaml ``` This creates the OPA deployment, the validation webhook as well as the config map which has the policy. ### 4. Exercise the policy Create a service object and ensure that it is enforcing the policy. **service_invalid.yaml**: ```yaml apiVersion: v1 kind: Service metadata: name: no-whitelist-ips spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: run: nginx type: LoadBalancer ``` **service_valid.yaml**: ```yaml apiVersion: v1 kind: Service metadata: name: whitelist-ips spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: run: nginx type: LoadBalancer loadBalancerSourceRanges: - 10.0.0.0/8 ``` ```bash kubectl create -f service_invalid.yaml kubectl create -f service_valid.yaml ``` This tutorial showed how you can leverage OPA to enforce admission control of Service objects to prevent accidentally exposing AWS resources to the world. ================================================ FILE: examples/service_validation/admission_controller.yaml ================================================ apiVersion: v1 kind: ServiceAccount metadata: name: opa-sa --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: opa-rolebinding subjects: - kind: ServiceAccount name: opa-sa namespace: opa roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io --- kind: Service apiVersion: v1 metadata: name: opa spec: selector: app: opa ports: - name: https protocol: TCP port: 443 targetPort: 443 --- apiVersion: extensions/v1beta1 kind: Deployment metadata: labels: app: opa name: opa spec: replicas: 1 template: metadata: labels: app: opa name: opa spec: serviceAccountName: opa-sa containers: - name: opa image: openpolicyagent/opa:0.8.0 args: - "run" - "--server" - "--tls-cert-file=/certs/tls.crt" - "--tls-private-key-file=/certs/tls.key" - "--addr=0.0.0.0:443" - "--insecure-addr=127.0.0.1:8181" volumeMounts: - readOnly: true mountPath: /certs name: opa-server - name: kube-mgmt image: openpolicyagent/kube-mgmt:0.12.1 volumes: - name: opa-server secret: secretName: opa-server --- kind: ValidatingWebhookConfiguration apiVersion: admissionregistration.k8s.io/v1 metadata: name: opa-validating-webhook webhooks: - name: validating-webhook.openpolicyagent.org admissionReviewVersions: ["v1beta1"] rules: - operations: ["CREATE"] apiGroups: ["*"] apiVersions: ["v1"] resources: ["services"] clientConfig: caBundle: REPLACE_WITH_SECRET service: namespace: opa name: opa sideEffects: None --- kind: ConfigMap apiVersion: v1 metadata: name: service-check data: main: | package kubernetes.admission import data.kubernetes.namespaces deny[msg] { input.request.kind.kind = "Service" input.request.operation = "CREATE" servicetype = input.request.object.spec.type contains(servicetype, "LoadBalancer") not input.request.object.spec.loadBalancerSourceRanges msg = sprintf("Rejecting service of type %q without specifying spec.loadBalancerSourceRanges", [servicetype]) } --- kind: ConfigMap apiVersion: v1 metadata: name: opa-default-system-main data: main: | package system import data.kubernetes.admission main = { "apiVersion": "admission.k8s.io/v1beta1", "kind": "AdmissionReview", "response": response, } default response = {"allowed": true} response = { "allowed": false, "status": { "reason": reason, }, } { reason = concat(", ", admission.deny) reason != "" } ================================================ FILE: examples/service_validation/install.sh ================================================ #!/bin/bash set -ex OUT_DIR=/tmp/opa rm -rf ${OUT_DIR}; mkdir -p ${OUT_DIR} openssl genrsa -out ${OUT_DIR}/ca.key 2048 openssl req -x509 -new -nodes -key ${OUT_DIR}/ca.key -days 100000 -out ${OUT_DIR}/ca.crt -subj "/CN=admission_ca" cat >${OUT_DIR}/server.conf < 0 { return expected[0] } return nil } // PutData describes a PutData request with an optional expected value // (expected value can be omitted) func PutData(path string, expected ...[]byte) Request { return Request{ req: putRequest, path: path, value: optional(expected...), } } // PatchData describes a PatchData request with an optional expected value // (expected value can be omitted) func PatchData(path string, op string, expected ...[]byte) Request { return Request{ req: patchRequest, path: path, op: op, value: optional(expected...), } } // InsertPolicy describes a InsertPolicy request with an optional expected value // (expected value can be omitted) func InsertPolicy(path string, expected ...[]byte) Request { return Request{ req: insertPolicyRequest, path: path, value: optional(expected...), } } // DeletePolicy describes a DeletePolicy request with an optional expected value // (expected value can be omitted) func DeletePolicy(path string) Request { return Request{ req: deletePolicyRequest, path: path, } } // Nothing describes an empty action. The client must not get any request for the given time func Nothing(duration time.Duration) Request { return Request{ req: noRequest, interval: duration, } } // Action performed when an expected Request arrives. // The request will return the result of invoking the Action. type Action func() error // Step combines a Request and an Action type Step struct { Request Action } // Do turns a Request into a Step func (req Request) Do(action Action) Step { return Step{ Request: req, Action: action, } } // DoError is a shortcut for Do(func() error { return err }) func (req Request) DoError(err error) Step { return Step{ Request: req, Action: func() error { return err }, } } // End is a shortcut for Do(nil) func (req Request) End() Step { return Step{ Request: req, } } ================================================ FILE: internal/expect/script.go ================================================ package expect import ( "context" "encoding/json" "fmt" "reflect" "strings" "testing" "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ) // Script is a sequence of expected Requests for a Client, // and the Actions to perform on each Request. type Script []Step // String implements fmt.Stringer func (s Script) String() string { return s.strings("\n") } // Play creates a client with the script provided, and runs the show. // When the script ends, the show is cancelled and the final state // of the client returned. func Play(t *testing.T, script Script, show func(ctx context.Context, client *Client)) *Client { steps := len(script) if steps <= 0 || show == nil { return nil } ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) // Arrange to cancel the context on the last step last := script[steps-1] script[steps-1].Action = func() error { defer cancel() if last.Action == nil { return nil } return last.Action() } var ( actor func(req Request, value interface{}) error improvise func(cursor int) cursor int = 0 ) actor = func(req Request, value interface{}) error { if cursor >= len(script) { t.Fatalf("Expected at most %d steps, got one more request %v", len(script), req) } // Save the actual value received to req. We do it // here because we have the *testing.T instance, and // can call t.Fatal if conversions fail. if req.req == insertPolicyRequest { req.value = value.([]byte) } else { req.value = MustRoundTrip(t, value) } // Check that the request matches the cue cue := script[cursor] if !cue.Equals(req) { seq := script[:cursor+1].strings("\n\t") t.Fatalf("Expected sequence:\n\t%v\nError at step %d, got:\n\t%v", seq, cursor, req) } cursor++ if cursor < len(script) && script[cursor].req == noRequest { // If the next update is timed, schedule it. go improvise(cursor) } if cue.Action == nil { return nil } return cue.Action() } // improvise triggers the step without any external input improvise = func(cursor int) { <-time.After(script[cursor].interval) actor(script[cursor].Request, nil) } client := &Client{actor: actor} if script[0].req == noRequest { // boot the script if the first step is a wait. go improvise(0) } show(ctx, client) if deadline, ok := ctx.Deadline(); ok && deadline.Before(time.Now()) { t.Fatalf("Test %s failed because of timeout", t.Name()) } return client } // MustMarshal marshals the objet to JSON, calls t.Fatal on error func MustMarshal(t *testing.T, obj interface{}) []byte { t.Helper() data, err := json.Marshal(obj) if err != nil { t.Fatalf("error marshalling JSON: %s", err) } return data } // MustUmnarshal unmarshals the objet from JSON, calls t.Fatal on error func MustUnmarshal(t *testing.T, data []byte) interface{} { t.Helper() var result interface{} if len(data) > 0 { err := json.Unmarshal(data, &result) if err != nil { t.Fatalf("error unmarshalling JSON: %s", err) } } return result } // mustRoundtrip marshals the object to JSON consistently. // // Kubernetes objects have custom marshallers that output the // json in a custom order. So comparing the marshalled representation of // a kubernetes object with that of a map or an *unstructured.Unstructured // built from that same object will fail. // // MustRoundTrip will make sure the generated string is comparable. func MustRoundTrip(t *testing.T, obj interface{}) []byte { return MustMarshal(t, MustUnmarshal(t, MustMarshal(t, obj))) } // MustEqual compares the values and calls t.Fatal on error func MustEqual(t *testing.T, result, expected interface{}) { t.Helper() if !reflect.DeepEqual(result, expected) { t.Fatalf("Expected:\n\n%q\n\nActual:\n\n%q\n", expected, result) } } // MustKey gets the mentaNamespaceKey of an object func MustKey(t *testing.T, obj runtime.Object) string { t.Helper() path, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { t.Fatalf("Failed to get path from object %v: %v", obj, err) } return path } // strings formats the Script as a list of strings for printing func (s Script) strings(sep string) string { steps := make([]string, 0, len(s)) for cursor, step := range s { steps = append(steps, fmt.Sprintf("%d: %s", cursor, step.String())) } return strings.Join(steps, sep) } ================================================ FILE: justfile ================================================ K3D := "kube-mgmt" TEST_RESULTS := 'build/test-results' @_default: @just --list # golang linter [group('code quality')] lint-go: go vet ./... staticcheck ./... # helm linter [group('code quality')] lint-helm filter="*": #!/usr/bin/env -S bash -euo pipefail mkdir -p {{TEST_RESULTS}}/helm-unittest helm unittest -f '../../test/lint/{{filter}}.yaml' \ --output-file {{TEST_RESULTS}}/helm-unittest/lint.xml --output-type JUnit charts/opa-kube-mgmt # run all linters [group('code quality')] lint: lint-go lint-helm # run helm unit tests [group('code quality')] test-helm filter="*": #!/usr/bin/env -S bash -euo pipefail mkdir -p {{TEST_RESULTS}}/helm-unittest helm unittest -f '../../test/unit/{{filter}}.yaml' \ --output-file {{TEST_RESULTS}}/helm-unittest/unit.xml --output-type JUnit charts/opa-kube-mgmt # run golang unit tests [group('code quality')] test-go: go test ./... # run linters and unit tests [group('code quality')] test: lint test-go test-helm @_token: kubectl exec deploy/opa-kube-mgmt -n default -c mgmt -- cat /bootstrap/mgmt-token # run e2e test using chainsaw and hurl [group('code quality')] test-e2e E2E_TEST="": _ctx #!/usr/bin/env -S bash -euo pipefail SCENARIO="{{E2E_TEST}}" if [ -z "$SCENARIO" ]; then SCENARIO=$(find test/e2e/ -mindepth 1 -maxdepth 1 -type d | sort | fzf --header "Select e2e scenario") fi devspace purge devspace deploy --var E2E_TEST="$SCENARIO" mkdir -p {{TEST_RESULTS}}/chainsaw OPA_TOKEN=$(just _token 2>/dev/null || true) chainsaw test "$SCENARIO" --quiet --namespace default \ --report-format JUNIT-TEST \ --report-name "$(basename "$SCENARIO")" --report-path {{TEST_RESULTS}}/chainsaw # run all e2e tests [group('code quality')] test-e2e-all: #!/usr/bin/env -S bash -euo pipefail for E in $(find test/e2e/ -name 'chainsaw-test.yaml'|xargs -n1 dirname|sort); do just test-e2e "${E}" done # start kube-mgmt in local k8s cluster [group('deployment')] @up: _ctx devspace deploy --var E2E_TEST=test/e2e/default # stop kube-mgmt in local k8s cluster [group('deployment')] @down: _ctx devspace purge --force-purge && rm -rf .devspace/ @_ctx: kubectl config use-context k3d-{{K3D}} _bundle: #!/usr/bin/env -S bash -euo pipefail opa build -b ./test/e2e/replicate_auto/bundle -o ./test/e2e/replicate_auto/bundle.tar.gz kubectl delete configmap -n default bundle --ignore-not-found kubectl create configmap -n default bundle --from-file ./test/e2e/replicate_auto/bundle.tar.gz # delete local k8s cluster [group('deployment')] @k3d-down: k3d cluster delete {{K3D}} || true # (re) create local k8s cluster using k3d [group('deployment')] all: k3d-down && _ctx _bundle #!/usr/bin/env -S bash -euo pipefail echo ' apiVersion: k3d.io/v1alpha5 kind: Simple metadata: name: {{K3D}} servers: 1 agents: 0 image: rancher/k3s:v1.33.9-k3s1 registries: create: name: k3d-{{K3D}}-registry host: "0.0.0.0" hostPort: "5001" config: | mirrors: "localhost:5001": endpoint: - http://k3d-{{K3D}}-registry:5000 ports: - port: 8080:80 nodeFilters: ["loadbalancer"] - port: 8443:443 nodeFilters: ["loadbalancer"] options: k3s: extraArgs: - arg: "--disable=local-storage,metrics-server" nodeFilters: ["server:*"] ' | k3d cluster create --config /dev/stdin kubectl config set-context k3d-{{K3D}} --namespace default docker login -u {{K3D}} -p {{K3D}} localhost:5001 kubectl wait --for=create crd/ingressroutetcps.traefik.io --timeout=2m sleep 3 kubectl wait --for=condition=Established crd/ingressroutetcps.traefik.io --timeout=30s ================================================ FILE: pkg/configmap/configmap.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package configmap import ( "context" "encoding/json" "fmt" "hash/fnv" "sort" "strconv" "strings" "time" "github.com/open-policy-agent/kube-mgmt/pkg/opa" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" ) const ( defaultRetries = 2 statusAnnotationKey = "openpolicyagent.org/kube-mgmt-status" retriesAnnotationKey = "openpolicyagent.org/kube-mgmt-retries" // Special namespace in Kubernetes federation that holds scheduling policies. // commented because staticcheck: 'const kubeFederationSchedulingPolicy is unused (U1000)' // kubeFederationSchedulingPolicy = "kube-federation-scheduling-policy" resyncPeriod = time.Second * 60 syncResetBackoffMin = time.Second syncResetBackoffMax = time.Second * 30 ) // Label validator func CustomLabel(key, value string) error { _, err := labels.NewRequirement(key, selection.Equals, []string{value}) if err != nil { return err } return nil } // DefaultConfigMapMatcher returns a function that will match configmaps in // specified namespaces and/or with a policy or data label. The first bool return // value specifies a policy/data match and the second bool indicates if the configmap // contains a policy. func DefaultConfigMapMatcher(namespaces []string, enablePolicies, enableData bool, policyLabelKey, policyLabelValue, dataLabelKey, dataLabelValue string) func(*v1.ConfigMap) (bool, bool) { return func(cm *v1.ConfigMap) (bool, bool) { var match, isPolicy bool if enableData { match = matchesNamespace(cm, namespaces) && matchesLabel(cm, dataLabelKey, dataLabelValue) } if !match && enablePolicies { match = matchesNamespace(cm, namespaces) && matchesLabel(cm, policyLabelKey, policyLabelValue) if match { isPolicy = true } } return match, isPolicy } } func matchesLabel(cm *v1.ConfigMap, labelKey, labelValue string) bool { return cm.Labels[labelKey] == labelValue } func matchesNamespace(cm *v1.ConfigMap, namespaces []string) bool { for _, ns := range namespaces { if ns == cm.Namespace || ns == "*" { return true } } return false } // Sync replicates policies or data stored in the API server as ConfigMaps into OPA. type Sync struct { kubeconfig *rest.Config opa opa.Client clientset *kubernetes.Clientset matcher func(*v1.ConfigMap) (bool, bool) } // New returns a new Sync that can be started. func New(kubeconfig *rest.Config, opa opa.Client, matcher func(*v1.ConfigMap) (bool, bool)) *Sync { cpy := *kubeconfig cpy.GroupVersion = &schema.GroupVersion{ Version: "v1", } cpy.APIPath = "/api" cpy.ContentType = runtime.ContentTypeJSON scheme := runtime.NewScheme() cpy.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)} builder := runtime.NewSchemeBuilder(func(scheme *runtime.Scheme) error { scheme.AddKnownTypes( *cpy.GroupVersion, &metav1.ListOptions{}, &metav1.Status{}, &v1.ConfigMapList{}, &v1.ConfigMap{}) return nil }) builder.AddToScheme(scheme) return &Sync{ kubeconfig: &cpy, opa: opa, matcher: matcher, } } // Run starts the synchronizer. To stop the synchronizer send a message to the // channel. func (s *Sync) Run(namespaces []string) (chan struct{}, error) { client, err := rest.RESTClientFor(s.kubeconfig) if err != nil { return nil, err } s.clientset, err = kubernetes.NewForConfig(s.kubeconfig) if err != nil { return nil, err } quit := make(chan struct{}) logrus.Infof("Policy/data ConfigMap processor connected to K8s: namespaces=%v", namespaces) for _, namespace := range namespaces { if namespace == "*" { namespace = v1.NamespaceAll } listerWatcher := cache.NewListWatchFromClient( client, "configmaps", namespace, fields.Everything()) _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ ListerWatcher: listerWatcher, ObjectType: &v1.ConfigMap{}, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: s.add, UpdateFunc: s.update, DeleteFunc: s.delete, }, ResyncPeriod: 0, // Set to 0 as in the original code }) go controller.Run(quit) } return quit, nil } func (s *Sync) add(obj interface{}) { cm := obj.(*v1.ConfigMap) if match, isPolicy := s.matcher(cm); match { logrus.Debugf("OnAdd cm=%v/%v, isPolicy=%v", cm.Namespace, cm.Name, isPolicy) s.syncAdd(cm, isPolicy) } } func (s *Sync) update(oldObj, obj interface{}) { oldCm, cm := oldObj.(*v1.ConfigMap), obj.(*v1.ConfigMap) if match, isPolicy := s.matcher(cm); match { logrus.Debugf("OnUpdate cm=%v/%v, isPolicy=%v, oldVer=%v, newVer=%v", cm.Namespace, cm.Name, isPolicy, oldCm.GetResourceVersion(), cm.GetResourceVersion()) if cm.GetResourceVersion() != oldCm.GetResourceVersion() { newFp, oldFp := fingerprint(cm), fingerprint(oldCm) rtrVal := cm.Annotations[retriesAnnotationKey] logrus.Debugf("OnUpdate cm=%v/%v, retries=%v, oldFp=%v, newFp=%v", cm.Namespace, cm.Name, rtrVal, oldFp, newFp) if newFp != oldFp || rtrVal != "0" { s.syncAdd(cm, isPolicy) } } } else { // check if the label was removed if match, isPolicy := s.matcher(oldCm); match { s.syncRemove(oldCm, isPolicy) } } } func (s *Sync) delete(obj interface{}) { if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { obj = d.Obj } cm := obj.(*v1.ConfigMap) if match, isPolicy := s.matcher(cm); match { logrus.Debugf("OnDelete cm=%v/%v", cm.Namespace, cm.Name) s.syncRemove(cm, isPolicy) } } func (s *Sync) syncAdd(cm *v1.ConfigMap, isPolicy bool) { path := fmt.Sprintf("%v/%v", cm.Namespace, cm.Name) logrus.Debugf("Adding cm=%v, isPolicy=%v", path, isPolicy) // sort keys so that errors, if any, are always in the same order sortedKeys := make([]string, 0, len(cm.Data)) for key := range cm.Data { sortedKeys = append(sortedKeys, key) } sort.Strings(sortedKeys) var syncErr errList for _, key := range sortedKeys { value := cm.Data[key] id := fmt.Sprintf("%v/%v", path, key) var err error if isPolicy { err = s.opa.InsertPolicy(id, []byte(value)) logrus.Infof("Added policy %v, err=%v", id, err) } else { // We don't need to know the JSON structure, just pass it // directly to the OPA data store. var data map[string]interface{} if err = json.Unmarshal([]byte(value), &data); err != nil { logrus.Errorf("Failed to parse JSON data in configmap with id=%s", id) } else { err = s.opa.PutData(id, data) logrus.Infof("Added data %v, err=%v", id, err) } } if err != nil { syncErr = append(syncErr, err) } } if syncErr != nil { var retries int = 0 if isPolicy { if rStr, ok := cm.Annotations[retriesAnnotationKey]; ok { r, err := strconv.Atoi(rStr) if err == nil && r > 0 { retries = r - 1 logrus.Debugf("Adding policies error cm=%v, old retry=%v, new retry=%v", path, rStr, retries) } else if err == nil && r == 0 { retries = defaultRetries logrus.Debugf("Adding policies error cm=%v, old retry=%v, new retry=%v", path, rStr, retries) } } else { retries = defaultRetries logrus.Debugf("Adding policies error cm=%v, no retry annotation, new retry=%v", path, retries) } } s.setAnnotations(cm, status{ Status: "error", Error: syncErr, }, retries) } else { s.setAnnotations(cm, status{ Status: "ok", }, 0) } } func (s *Sync) syncRemove(cm *v1.ConfigMap, isPolicy bool) { logrus.Debugf("Attempting to remove cm=%v/%v, isPolicy=%v", cm.Namespace, cm.Name, isPolicy) path := fmt.Sprintf("%v/%v", cm.Namespace, cm.Name) for key := range cm.Data { id := fmt.Sprintf("%v/%v", path, key) if isPolicy { if err := s.opa.DeletePolicy(id); err != nil { logrus.Errorf("Failed to delete policy %v: %v", id, err) } } else { if err := s.opa.PatchData(path, "remove", nil); err != nil { logrus.Errorf("Failed to remove %v (will reset OPA data and resync in %v): %v", id, resyncPeriod, err) s.syncReset(id) } } } } func (s *Sync) setAnnotations(cm *v1.ConfigMap, st status, retries int) { bs, err := json.Marshal(st) if err != nil { logrus.Errorf("Failed to serialize status for cm=%v/%v, err=%v", cm.Namespace, cm.Name, err) return } patch := map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]interface{}{ statusAnnotationKey: string(bs), retriesAnnotationKey: strconv.Itoa(retries), }, }, } bs, err = json.Marshal(patch) if err != nil { logrus.Errorf("Failed to serialize patch for %v/%v: %v", cm.Namespace, cm.Name, err) return } _, err = s.clientset.CoreV1().ConfigMaps(cm.Namespace).Patch(context.TODO(), cm.Name, types.StrategicMergePatchType, bs, metav1.PatchOptions{}) if err != nil { logrus.Errorf("Failed to %v for %v/%v: %v", statusAnnotationKey, cm.Namespace, cm.Name, err) } } func (s *Sync) syncReset(id string) { logrus.Debugf("Attempting to reset %v", id) d := syncResetBackoffMin for { if err := s.opa.PutData("/", map[string]interface{}{}); err != nil { logrus.Errorf("Failed to reset OPA data for %v (will retry after %v): %v", id, d, err) } else { return } time.Sleep(d) d = d * 2 if d > syncResetBackoffMax { d = syncResetBackoffMax } } } // fingerprint for the labels and data of a configmap. func fingerprint(cm *v1.ConfigMap) uint64 { hash := fnv.New64a() data := json.NewEncoder(hash) data.Encode(cm.Labels) data.Encode(cm.Data) return hash.Sum64() } // errList is an error type that can marshal a list of errors to json type errList []error var ( // Make sure we implement the proper interfaces _ error = errList{} _ json.Marshaler = errList{} ) type status struct { Status string `json:"status"` Error errList `json:"error,omitempty"` } // MarshalJSON implements json.Marshaler func (m errList) MarshalJSON() ([]byte, error) { if len(m) <= 0 { return []byte(`""`), nil } list := make([]json.RawMessage, 0, len(m)) for _, err := range m { if b, marshalErr := json.Marshal(err); marshalErr == nil { list = append(list, b) } else { // fallback to quoted .Error() string if marshalling fails list = append(list, []byte(fmt.Sprintf("%q", err.Error()))) } } if len(list) == 1 { return list[0], nil // for backward compatibility } return json.Marshal(list) } // Error implements error func (m errList) Error() string { if len(m) <= 0 { return "" } text := make([]string, 0, len(m)) for _, err := range m { text = append(text, err.Error()) } return strings.Join(text, "\n") } ================================================ FILE: pkg/data/generic.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package data import ( "context" "fmt" "strings" "sync" "time" opa_client "github.com/open-policy-agent/kube-mgmt/pkg/opa" "github.com/open-policy-agent/kube-mgmt/pkg/types" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) // The min/max amount of time to wait when resetting the synchronizer. const ( backoffMax = time.Second * 30 backoffMin = time.Second jitterFactor = 1.2 FieldMeta = "metadata.namespace!=" ) // GenericSync replicates Kubernetes resources into OPA as raw JSON. type GenericSync struct { createError error // to support deprecated calls to New / Run client dynamicClient opa opa_client.Data ns types.ResourceType limiter workqueue.TypedRateLimiter[any] jitterFactor float64 ignoreNamespaces []string mu sync.Mutex ready bool } // New returns a new GenericSync that can be started. // Deprecated: Please Use NewFromInterface instead. func New(kubeconfig *rest.Config, opa opa_client.Data, ns types.ResourceType) *GenericSync { client, err := dynamic.NewForConfig(kubeconfig) if err != nil { return &GenericSync{createError: err} } return NewFromInterface(client, opa, ns) } type Option func(s *GenericSync) // NewFromInterface returns a new GenericSync that can be started. func NewFromInterface(client dynamic.Interface, opa opa_client.Data, ns types.ResourceType, opts ...Option) *GenericSync { s := &GenericSync{ client: dynamicClient{client}, ns: ns, opa: opa.Prefix(ns.Resource), jitterFactor: jitterFactor, } for _, opt := range opts { opt(s) } if s.limiter == nil { // Use default rateLimiter if not configured s.limiter = workqueue.NewTypedItemExponentialFailureRateLimiter[any](backoffMin, backoffMax) } return s } // WithIgnoreNamespaces provides a list of namespaces to ignore func WithIgnoreNamespaces(ignoreNamespaces []string) Option { return func(s *GenericSync) { s.ignoreNamespaces = ignoreNamespaces } } // WithBackoff tunes the values of exponential backoff and jitter factor func WithBackoff(min, max time.Duration, jitterFactor float64) Option { return func(s *GenericSync) { s.limiter = workqueue.NewTypedItemExponentialFailureRateLimiter[any](min, max) s.jitterFactor = jitterFactor } } // Run starts the synchronizer. To stop the synchronizer send a message to the // channel. // Deprecated: Please use RunContext instead. func (s *GenericSync) Run() (chan struct{}, error) { // To support legacy way of creating GenericSync from *rest.Config if s.createError != nil { return nil, s.createError } quit := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) go func() { // propagate cancel signal from channel to context <-quit cancel() }() go s.RunContext(ctx) return quit, nil } // RunContext starts the synchronizer in the foreground. // To stop the synchronizer, cancel the context. func (s *GenericSync) RunContext(ctx context.Context) error { if s.createError != nil { return s.createError } store, queue := s.setup(ctx) go func() { <-ctx.Done() queue.ShutDown() }() s.loop(store, queue) return nil } func (s *GenericSync) Ready() bool { s.mu.Lock() defer s.mu.Unlock() return s.ready } // setup the store and queue for this GenericSync instance func (s *GenericSync) setup(ctx context.Context) (cache.Store, workqueue.TypedDelayingInterface[any]) { ignoreNs := s.ignoreNs() resource := s.client.ResourceFor(s.ns, metav1.NamespaceAll) queue := workqueue.NewNamedDelayingQueue(s.ns.String()) store, controller := cache.NewInformerWithOptions(cache.InformerOptions{ ListerWatcher: &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = ignoreNs return resource.List(ctx, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = ignoreNs return resource.Watch(ctx, options) }, }, ObjectType: &unstructured.Unstructured{}, Handler: resourceEventQueue{queue}, ResyncPeriod: 0, }) start, quit := time.Now(), ctx.Done() go controller.Run(quit) for !cache.WaitForCacheSync(quit, controller.HasSynced) { logrus.Warnf("Failed to sync cache for %v, retrying...", s.ns) } if controller.HasSynced() { logrus.Infof("Initial informer sync for %v completed, took %v", s.ns, time.Since(start)) } return store, queue } func (s *GenericSync) ignoreNs() string { var ignoreNs string if !s.ns.Namespaced { return ignoreNs } if len(s.ignoreNamespaces) >= 1 { for _, ns := range s.ignoreNamespaces { ignoreNs = FieldMeta + ns + "," + ignoreNs } } ignoreNs = strings.TrimSuffix(ignoreNs, ",") return ignoreNs } // resourceEventQueue is a cache.ResourceEventHandler that queues all events type resourceEventQueue struct { workqueue.Interface } // OnAdd implements ResourceHandler func (q resourceEventQueue) OnAdd(obj interface{}, isInInitialList bool) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { logrus.Warnf("failed to retrieve key: %v", err) return } q.Add(key) } func (q resourceEventQueue) resourceVersionMatch(oldObj, newObj interface{}) bool { var ( oldMeta metav1.Object newMeta metav1.Object err error ) oldMeta, err = meta.Accessor(oldObj) if err == nil { newMeta, err = meta.Accessor(newObj) } if err != nil { logrus.Warnf("failed to retrieve meta: %v", err) return false } return newMeta.GetResourceVersion() == oldMeta.GetResourceVersion() } // OnUpdate implements ResourceHandler func (q resourceEventQueue) OnUpdate(oldObj, newObj interface{}) { if !q.resourceVersionMatch(oldObj, newObj) { // Avoid sync flood on relist. We don't use resync. q.OnAdd(newObj, false) } } // OnDelete implements ResourceHandler func (q resourceEventQueue) OnDelete(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { logrus.Warnf("failed to retrieve key: %v", err) return } q.Add(key) } const initPath = "" // loop starts replicating Kubernetes resources into OPA. If an error occurs // during the replication process, this function will backoff and reload // all resources into OPA from scratch. func (s *GenericSync) loop(store cache.Store, queue workqueue.TypedDelayingInterface[any]) { logrus.Infof("Syncing %v.", s.ns) defer func() { logrus.Infof("Sync for %v finished. Exiting.", s.ns) }() var delay time.Duration for !queue.ShuttingDown() { queue.AddAfter(initPath, delay) // this special path will trigger a full load syncDone := false // discard everything until initPath var err error for err == nil { key, shuttingDown := queue.Get() if shuttingDown { return } err = s.processNext(store, key.(string), &syncDone) if key == initPath && syncDone { s.limiter.Forget(initPath) } queue.Done(key) } delay := wait.Jitter(s.limiter.When(initPath), s.jitterFactor) logrus.Errorf("Sync for %v failed, trying again in %v. Reason: %v", s.ns, delay, err) } } func (s *GenericSync) processNext(store cache.Store, path string, syncDone *bool) error { // On receiving the initPath, load a full dump of the data store if path == initPath { if *syncDone { return nil } start, list := time.Now(), store.List() if err := s.syncAll(list); err != nil { return err } s.mu.Lock() s.ready = true s.mu.Unlock() logrus.Infof("Loaded %d resources of kind %v into OPA. Took %v", len(list), s.ns, time.Since(start)) *syncDone = true // sync is now Done return nil } // Ignore updates queued before the initial load if !*syncDone { return nil } obj, exists, err := store.GetByKey(path) if err != nil { return fmt.Errorf("store error: %w", err) } if exists { if err := s.opa.PutData(path, obj); err != nil { return fmt.Errorf("add event: %w", err) } } else { if err := s.opa.PatchData(path, "remove", nil); err != nil { return fmt.Errorf("delete event: %w", err) } } return nil } func (s *GenericSync) syncAll(objs []interface{}) error { // Build a list of patches to apply. payload, err := generateSyncPayload(objs, s.ns.Namespaced) if err != nil { return err } return s.opa.PutData("/", payload) } func generateSyncPayload(objs []interface{}, namespaced bool) (map[string]interface{}, error) { combined := make(map[string]interface{}, len(objs)) for _, obj := range objs { path, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { return nil, err } // Ensure the path in the map up to our value exists // We make some assumptions about the paths that do exist // being the correct types due to the expected uniform // paths for each of the similar object types being // sync'd with the GenericSync instance. segments := strings.Split(path, "/") dir := combined for i := 0; i < len(segments)-1; i++ { next, ok := combined[segments[i]] if !ok { next = map[string]interface{}{} dir[segments[i]] = next } dir = next.(map[string]interface{}) } dir[segments[len(segments)-1]] = obj } return combined, nil } ================================================ FILE: pkg/data/generic_test.go ================================================ package data import ( "context" "errors" "fmt" "testing" "time" "github.com/open-policy-agent/kube-mgmt/internal/expect" "github.com/open-policy-agent/kube-mgmt/pkg/types" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" ) type testCase struct { Label string ResourceType types.ResourceType Prefix string Objs []runtime.Object Expected string } // NewFakeDynamicClient builds a new FakeDynamicClient func newFakeDynamicClient(t *testing.T, objs ...runtime.Object) dynamicClient { sc := runtime.NewScheme() if err := scheme.AddToScheme(sc); err != nil { t.Fatalf("Failed to build initial scheme: %v", err) } return dynamicClient{resourceInterface: fake.NewSimpleDynamicClient(sc, objs...)} } func TestGenericSync(t *testing.T) { t.Parallel() testCases := []testCase{ { Label: "Single Cluster Resource", ResourceType: types.ResourceType{ Namespaced: false, Resource: "nodes", Version: "v1", }, Prefix: "", Objs: []runtime.Object{ &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node1", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, }, Expected: `{ "node1":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node1", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } } }`, }, { Label: "Single Cluster Resource With Prefix", ResourceType: types.ResourceType{ Namespaced: false, Resource: "nodes", Version: "v1", }, Prefix: "kube", Objs: []runtime.Object{ &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node1", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, }, Expected: `{ "node1":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node1", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } } }`, }, { Label: "Multiple Cluster Resources With Prefix", ResourceType: types.ResourceType{ Namespaced: false, Resource: "nodes", Version: "v1", }, Prefix: "kube", Objs: []runtime.Object{ &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node1", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node2", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node3", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, }, Expected: `{ "node1":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node1", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } }, "node2":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node2", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } }, "node3":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node3", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } } }`, }, { Label: "Single Namespaced Resource", ResourceType: types.ResourceType{ Namespaced: true, Resource: "pods", Version: "v1", }, Prefix: "", Objs: []runtime.Object{ &apiv1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns1", ResourceVersion: "0", }, Spec: apiv1.PodSpec{}, Status: apiv1.PodStatus{}, }, }, Expected: `{ "ns1":{ "pod1":{ "apiVersion": "v1", "kind": "Pod", "metadata":{ "creationTimestamp":null, "name":"pod1", "namespace":"ns1", "resourceVersion":"0" }, "spec":{ "containers":null }, "status":{ } } } }`, }, { Label: "Single Namespaced Resource With Prefix", ResourceType: types.ResourceType{ Namespaced: true, Resource: "pods", Version: "v1", }, Prefix: "kube", Objs: []runtime.Object{ &apiv1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns1", ResourceVersion: "0", }, Spec: apiv1.PodSpec{}, Status: apiv1.PodStatus{}, }, }, Expected: `{ "ns1":{ "pod1":{ "apiVersion": "v1", "kind": "Pod", "metadata":{ "creationTimestamp":null, "name":"pod1", "namespace":"ns1", "resourceVersion":"0" }, "spec":{ "containers":null }, "status":{ } } } }`, }, { Label: "Multiple Namespaced Resources With Prefix", ResourceType: types.ResourceType{ Namespaced: true, Resource: "pods", Version: "v1", }, Prefix: "kube", Objs: []runtime.Object{ &apiv1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns1", ResourceVersion: "0", }, Spec: apiv1.PodSpec{}, Status: apiv1.PodStatus{}, }, &apiv1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns1", ResourceVersion: "0", }, Spec: apiv1.PodSpec{}, Status: apiv1.PodStatus{}, }, &apiv1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns2", ResourceVersion: "0", }, Spec: apiv1.PodSpec{}, Status: apiv1.PodStatus{}, }, }, Expected: `{ "ns1":{ "pod1":{ "apiVersion": "v1", "kind": "Pod", "metadata":{ "creationTimestamp":null, "name":"pod1", "namespace":"ns1", "resourceVersion":"0" }, "spec":{ "containers":null }, "status":{ } }, "pod2":{ "apiVersion": "v1", "kind": "Pod", "metadata":{ "creationTimestamp":null, "name":"pod2", "namespace":"ns1", "resourceVersion":"0" }, "spec":{ "containers":null }, "status":{ } } }, "ns2":{ "pod1": { "apiVersion": "v1", "kind": "Pod", "metadata":{ "creationTimestamp":null, "name":"pod1", "namespace":"ns2", "resourceVersion":"0" }, "spec":{ "containers":null }, "status":{ } } } }`, }, } for _, tc := range testCases { tc := tc // We will be running the tests in parallel, so avoid issues with loop var expected := expect.MustMarshal(t, expect.MustUnmarshal(t, []byte(tc.Expected))) t.Run(fmt.Sprintf("%s - Must Generate Sync Payload", tc.Label), func(t *testing.T) { t.Parallel() tc.testGenerateSyncPayload(t, expected) }) t.Run(fmt.Sprintf("%s - Must Load Existing Resources", tc.Label), func(t *testing.T) { t.Parallel() tc.testLoad(t, expected) }) t.Run(fmt.Sprintf("%s - Must Add New Resources", tc.Label), func(t *testing.T) { t.Parallel() tc.testAdd(t) }) t.Run(fmt.Sprintf("%s - Must Remove Resources", tc.Label), func(t *testing.T) { t.Parallel() tc.testDelete(t) }) t.Run(fmt.Sprintf("%s - Must Update Resources", tc.Label), func(t *testing.T) { t.Parallel() tc.testUpdate(t) }) t.Run(fmt.Sprintf("%s - Must Retry Load On Error", tc.Label), func(t *testing.T) { t.Parallel() tc.testRetryLoad(t, expected) }) t.Run(fmt.Sprintf("%s - Must Retry Add On Error", tc.Label), func(t *testing.T) { t.Parallel() tc.testRetryAdd(t) }) t.Run(fmt.Sprintf("%s - Must Retry Update On Error", tc.Label), func(t *testing.T) { t.Parallel() tc.testRetryUpdate(t) }) t.Run(fmt.Sprintf("%s - Must Retry Delete On Error", tc.Label), func(t *testing.T) { t.Parallel() tc.testRetryDelete(t) }) } } func TestEventQueue(t *testing.T) { t.Parallel() tc := testCase{ Label: "Single Cluster Resource", ResourceType: types.ResourceType{ Namespaced: false, Resource: "nodes", Version: "v1", }, Prefix: "", Objs: []runtime.Object{ &apiv1.Node{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Node", }, ObjectMeta: metav1.ObjectMeta{ Name: "node1", ResourceVersion: "0", }, Spec: apiv1.NodeSpec{}, Status: apiv1.NodeStatus{}, }, }, Expected: `{ "node1":{ "apiVersion": "v1", "kind": "Node", "metadata":{ "creationTimestamp":null, "name":"node1", "resourceVersion":"0" }, "spec":{ }, "status":{ "daemonEndpoints":{ "kubeletEndpoint":{ "Port":0 } }, "nodeInfo":{ "architecture":"", "bootID":"", "containerRuntimeVersion":"", "kernelVersion":"", "kubeProxyVersion":"", "kubeletVersion":"", "machineID":"", "operatingSystem":"", "osImage":"", "systemUUID":"" } } } }`, } t.Run(fmt.Sprintf("%s - Must Update On Different ResourceVersion", tc.Label), func(t *testing.T) { t.Parallel() tc.testUpdateDifferentVersion(t) }) t.Run(fmt.Sprintf("%s - Must Skip On Same ResourceVersion", tc.Label), func(t *testing.T) { t.Parallel() tc.testUpdateSameVersion(t) }) } func (tc *testCase) testGenerateSyncPayload(t *testing.T, expected []byte) { data := make([]interface{}, 0, len(tc.Objs)) for _, obj := range tc.Objs { data = append(data, obj) } patches, err := generateSyncPayload(data, tc.ResourceType.Namespaced) if err != nil { t.Fatalf("Unexpected error: %v", err) } result := expect.MustRoundTrip(t, patches) expect.MustEqual(t, result, expected) } func (tc *testCase) Play(t *testing.T, client dynamicClient, play expect.Script) *expect.Client { t.Helper() return expect.Play(t, play, func(ctx context.Context, mockClient *expect.Client) { data := mockClient.Prefix(tc.Prefix) sync := NewFromInterface( client, data, tc.ResourceType, WithBackoff(0, 5*time.Second, 0), ) sync.RunContext(ctx) }) } func (tc *testCase) testLoad(t *testing.T, expected []byte) { client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/", expected).End(), } data := tc.Play(t, client, play) expect.MustEqual(t, data.PrefixList, []string{tc.Prefix, tc.ResourceType.Resource}) } func (tc *testCase) testAdd(t *testing.T) { client, obj := newFakeDynamicClient(t), tc.Objs[0] play := expect.Script{ expect.PutData("/", []byte("{}")).Do(client.MustCreate(t, tc.ResourceType, obj)), expect.PutData(expect.MustKey(t, obj), expect.MustRoundTrip(t, obj)).End(), } tc.Play(t, client, play) } func (tc *testCase) testDelete(t *testing.T) { client, obj := newFakeDynamicClient(t, tc.Objs...), tc.Objs[0] play := expect.Script{ expect.PutData("/").Do(client.MustRemove(t, tc.ResourceType, obj)), expect.PatchData(expect.MustKey(t, obj), "remove").End(), } tc.Play(t, client, play) } func (tc *testCase) testUpdate(t *testing.T) { change := mustUnstructure(t, tc.Objs[0]) change.SetLabels(map[string]string{"test": "update"}) change.SetResourceVersion("1") client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/").Do(client.MustUpdate(t, tc.ResourceType, change)), expect.PutData(expect.MustKey(t, change), expect.MustRoundTrip(t, change.Object)).End(), } tc.Play(t, client, play) } func (tc *testCase) testRetryLoad(t *testing.T, expected []byte) { client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/").DoError(errors.New("test fail update")), expect.PutData("/", expected).End(), } tc.Play(t, client, play) } func (tc *testCase) testRetryAdd(t *testing.T) { client, obj := newFakeDynamicClient(t), tc.Objs[0] play := expect.Script{ expect.PutData("/").Do(client.MustCreate(t, tc.ResourceType, obj)), expect.PutData(expect.MustKey(t, obj)).DoError(errors.New("test fail update")), expect.PutData("/").End(), } tc.Play(t, client, play) } func (tc *testCase) testRetryUpdate(t *testing.T) { change := mustUnstructure(t, tc.Objs[0]) change.SetLabels(map[string]string{"test": "update"}) change.SetResourceVersion("1") client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/").Do(client.MustUpdate(t, tc.ResourceType, change)), expect.PutData(expect.MustKey(t, change)).DoError(errors.New("Failed to update")), expect.PutData("/").End(), // don't check the payload on this last put, because we // have removed an item so it no longer matches the tc.expected } tc.Play(t, client, play) } func (tc *testCase) testRetryDelete(t *testing.T) { client, obj := newFakeDynamicClient(t, tc.Objs...), tc.Objs[0] play := expect.Script{ expect.PutData("/").Do(client.MustRemove(t, tc.ResourceType, obj)), expect.PatchData(expect.MustKey(t, obj), "remove").DoError(errors.New("test Patch failed")), expect.PutData("/").End(), // don't check the payload on this last put, because we // have removed an item so it no longer matches the tc.expected } tc.Play(t, client, play) } func (tc *testCase) testUpdateSameVersion(t *testing.T) { change := mustUnstructure(t, tc.Objs[0]) change.SetAnnotations(map[string]string{"test": "update"}) client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/").Do(client.MustUpdate(t, tc.ResourceType, change)), expect.Nothing(100 * time.Millisecond).End(), } tc.Play(t, client, play) } func (tc *testCase) testUpdateDifferentVersion(t *testing.T) { change := mustUnstructure(t, tc.Objs[0]) change.SetLabels(map[string]string{"test": "update"}) change.SetResourceVersion("1") client := newFakeDynamicClient(t, tc.Objs...) play := expect.Script{ expect.PutData("/").Do(client.MustUpdate(t, tc.ResourceType, change)), expect.PutData(expect.MustKey(t, change), expect.MustRoundTrip(t, change)).End(), } tc.Play(t, client, play) } // MustCreate returns an action that creates an instance of the resource func (f dynamicClient) MustCreate(t *testing.T, resourceType types.ResourceType, obj runtime.Object) expect.Action { namespace := mustAccess(t, obj).GetNamespace() return func() error { r := f.ResourceFor(resourceType, namespace) if _, err := r.Create(context.Background(), mustUnstructure(t, obj), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create object %v: %v", obj, err) } return nil } } // MustRemove returns an Action that removes an instance of the resource func (f dynamicClient) MustRemove(t *testing.T, resourceType types.ResourceType, obj runtime.Object) expect.Action { m := mustAccess(t, obj) return func() error { r := f.ResourceFor(resourceType, m.GetNamespace()) if err := r.Delete(context.Background(), m.GetName(), metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to remove object %v: %v", obj, err) } return nil } } // MustUpdate returns an Action that updates an instance of the resource func (f dynamicClient) MustUpdate(t *testing.T, resourceType types.ResourceType, obj runtime.Object) expect.Action { namespace := mustAccess(t, obj).GetNamespace() return func() error { r := f.ResourceFor(resourceType, namespace) if _, err := r.Update(context.Background(), obj.(*unstructured.Unstructured), metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to create object %v: %v", obj, err) } return nil } } // mustUnstructure clones the object provided into an Unstructured object func mustUnstructure(t *testing.T, obj runtime.Object) *unstructured.Unstructured { copiedObj := expect.MustUnmarshal(t, expect.MustMarshal(t, obj)) if asMap, ok := copiedObj.(map[string]interface{}); ok { return &unstructured.Unstructured{Object: asMap} } t.Fatalf("Failed to copy %#v as a map[string]interface{}", obj) return nil // to make staticcheck happy } // mustAccess returns an accessor for the given object func mustAccess(t *testing.T, obj runtime.Object) metav1.Object { m, err := meta.Accessor(obj) if err != nil { t.Fatalf("Failed to build accessor for %v: %v", obj, err) } return m } func TestGenericSync_ignoreNs(t *testing.T) { type fields struct { ignoreNamespaces []string } tests := []struct { name string fields fields want string ns types.ResourceType }{ { name: "empty fields", fields: fields{ []string{}, }, want: "", ns: types.ResourceType{Namespaced: true}, }, { name: "one field", fields: fields{ []string{"cluster-autosscaler"}, }, want: "", ns: types.ResourceType{Namespaced: false}, }, { name: "one field", fields: fields{ []string{"cluster-autosscaler"}, }, want: "metadata.namespace!=cluster-autosscaler", ns: types.ResourceType{Namespaced: true}, }, { name: "two fields", fields: fields{ []string{"cluster-autoscaler", "cluster-manager"}, }, want: "metadata.namespace!=cluster-manager,metadata.namespace!=cluster-autoscaler", ns: types.ResourceType{Namespaced: true}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &GenericSync{ ignoreNamespaces: tt.fields.ignoreNamespaces, ns: tt.ns, } if got := s.ignoreNs(); got != tt.want { t.Errorf("GenericSync.ignoreNs() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: pkg/data/types.go ================================================ package data import ( "github.com/open-policy-agent/kube-mgmt/pkg/types" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" ) // resourceInterface knows how to use the method `Resource` of dynamic.Interface type resourceInterface interface { Resource(schema.GroupVersionResource) dynamic.NamespaceableResourceInterface } // dynamicClient wraps a resourceInterface with some utilities type dynamicClient struct { resourceInterface } // ResourceFor builds a dynamic.ResourceInterface for a ResourceType func (f dynamicClient) ResourceFor(resourceType types.ResourceType, namespace string) dynamic.ResourceInterface { resource := f.Resource(schema.GroupVersionResource{ Group: resourceType.Group, Version: resourceType.Version, Resource: resourceType.Resource, }) if resourceType.Namespaced { return resource.Namespace(namespace) } return resource } ================================================ FILE: pkg/dynamicdata/dynamicdata.go ================================================ package dynamicdata import ( "bytes" "context" "fmt" "os" "sort" "sync" "github.com/open-policy-agent/kube-mgmt/pkg/data" "github.com/open-policy-agent/kube-mgmt/pkg/opa" "github.com/open-policy-agent/kube-mgmt/pkg/types" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/ast" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/dependencies" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/logging" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/plugins" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/sdk" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/storage" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles "github.com/open-policy-agent/opa/storage/inmem" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) type Sync struct { opaConfig []byte kubeconfig *rest.Config opaURL, opaAuth string ignoreNs []string analysisEntrypoint string replicatePath string logger logging.Logger running map[types.ResourceType]*cancellableSync mu sync.Mutex ready bool } func New(configFile string, analysisEntrypoint string, opaURL, opaAuth string, ignoreNs []string, replicatePath string, kubeconfig *rest.Config, logger logging.Logger) (*Sync, error) { bs, err := os.ReadFile(configFile) if err != nil { return nil, err } sync := &Sync{ opaConfig: bs, kubeconfig: kubeconfig, opaAuth: opaAuth, opaURL: opaURL, ignoreNs: ignoreNs, analysisEntrypoint: analysisEntrypoint, replicatePath: replicatePath, logger: logger, running: make(map[types.ResourceType]*cancellableSync), } return sync, nil } func (s *Sync) Run(ctx context.Context) error { s.logger.Debug("Loading kubeconfig for API server") client, err := dynamic.NewForConfig(s.kubeconfig) if err != nil { return err } s.logger.Debug("Resolving resource names to resource types") rts, err := resolveResourceTypes(s.kubeconfig) if err != nil { return err } s.logger.Debug("Starting analyzer") analyzer, err := newAnalyzer(ctx, s.opaConfig, s.replicatePath, s.analysisEntrypoint, s.logger) if err != nil { return err } go s.loop(ctx, analyzer, rts, client) return nil } func (s *Sync) Ready() bool { s.mu.Lock() defer s.mu.Unlock() if !s.ready { s.logger.Debug("Sync is not ready") return false } for rt, r := range s.running { if !r.sync.Ready() { s.logger.Debug("Replicator for %v is not ready", rt) return false } } return true } func (s *Sync) loop(ctx context.Context, a *analyzer, rts map[string]types.ResourceType, client *dynamic.DynamicClient) { for { s.logger.Debug("Sync waiting for analysis result") select { case result := <-a.C: s.logger.Debug("Sync processing analysis result: %v", result) s.processAnalysisResult(ctx, result, rts, client) case <-ctx.Done(): s.logger.Debug("Sync shutting down") } } } func (s *Sync) processAnalysisResult(ctx context.Context, result analysisResult, rts map[string]types.ResourceType, client *dynamic.DynamicClient) { s.mu.Lock() defer s.mu.Unlock() // If any of the refs cannot be mapped to gvk then give up. for _, ref := range result.Refs { if _, ok := rts[ref.Resource]; !ok { logrus.Errorf("Cannot resolve Kubernetes resource %q to group/version/resource for dynamic data replication", ref.Resource) s.ready = false return } } // Otherwise, create and delete data syncs accordingly. s.ready = true create := map[types.ResourceType]struct{}{} for _, ref := range result.Refs { rt := rts[ref.Resource] create[rt] = struct{}{} if _, ok := s.running[rt]; !ok { s.logger.Debug("Starting data replication for %v", rt) sync := data.NewFromInterface(client, opa.New(s.opaURL, s.opaAuth).Prefix(s.replicatePath), rt, data.WithIgnoreNamespaces(s.ignoreNs)) ctx, cancel := context.WithCancel(ctx) s.running[rt] = &cancellableSync{cancel: cancel, sync: sync} go sync.RunContext(ctx) } else { s.logger.Debug("Data replication for %v already started", rt) } } for rt, sync := range s.running { if _, ok := create[rt]; !ok { s.logger.Debug("Stopping replication for %v", rt) sync.cancel() delete(s.running, rt) } } } func resolveResourceTypes(config *rest.Config) (map[string]types.ResourceType, error) { discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { return nil, fmt.Errorf("failed to create discovery client: %v", err) } resources, err := discoveryClient.ServerPreferredResources() if err != nil { return nil, fmt.Errorf("failed to get server preferred resources: %v", err) } result := map[string]types.ResourceType{} for _, r := range resources { gv, err := schema.ParseGroupVersion(r.GroupVersion) if err != nil { return nil, err } for _, ar := range r.APIResources { rt := types.ResourceType{ Namespaced: ar.Namespaced, Resource: ar.Name, Group: ar.Group, Version: ar.Version, } if rt.Group == "" { rt.Group = gv.Group } if rt.Version == "" { rt.Version = gv.Version } logrus.Infof("Discovered resource %v mapping to type %v (namespaced: %v)", ar.Name, rt, rt.Namespaced) result[ar.Name] = rt } } return result, nil } type cancellableSync struct { cancel context.CancelFunc sync *data.GenericSync } type analyzer struct { C chan analysisResult updates chan *ast.Compiler opa *sdk.OPA prefix ast.Ref entry ast.Ref logger logging.Logger } type analysisResult struct { Refs []ref } func newAnalyzer(ctx context.Context, bs []byte, replicatePath, analysisEntrypoint string, logger logging.Logger) (*analyzer, error) { a := &analyzer{ C: make(chan analysisResult), updates: make(chan *ast.Compiler, 1), logger: logger, } var err error a.prefix, err = ast.PtrRef(ast.DefaultRootDocument, replicatePath) if err != nil { return nil, err } a.entry, err = ast.PtrRef(ast.DefaultRootDocument, analysisEntrypoint) if err != nil { return nil, err } go a.loop(ctx) store := inmem.New() err = storage.Txn(ctx, store, storage.TransactionParams{Write: true}, func(txn storage.Transaction) error { _, err := store.Register(ctx, txn, storage.TriggerConfig{OnCommit: a.trigger}) return err }) if err != nil { return nil, err } a.opa, err = sdk.New(ctx, sdk.Options{Config: bytes.NewBuffer(bs), Store: store, Logger: logger}) if err != nil { return nil, err } return a, nil } type ref struct { Resource string } func (a *analyzer) Stop(ctx context.Context) error { close(a.C) a.opa.Stop(ctx) return nil } func (a *analyzer) trigger(_ context.Context, txn storage.Transaction, event storage.TriggerEvent) { compiler := plugins.GetCompilerOnContext(event.Context) a.logger.Debug("Analyzer received storage trigger callback (txn=%d, compiler=%p)", txn.ID(), compiler) if compiler == nil { return } a.updates <- compiler } func (a *analyzer) loop(ctx context.Context) { for { select { case compiler := <-a.updates: refs, missing, err := analyzeRefs(compiler, []ast.Ref{a.entry}, a.prefix, a.logger) if err != nil { a.logger.Error("Failed to analyze refs: %v", err) continue } if len(missing) > 0 { a.logger.Debug("Analysis could not find entrypoints %v, skipping update", missing) continue } a.C <- analysisResult{Refs: refs} case <-ctx.Done(): logrus.Info("Analyzer shutting down") return } } } func analyzeRefs(c *ast.Compiler, entrypoints []ast.Ref, prefix ast.Ref, logger logging.Logger) ([]ref, []ast.Ref, error) { logger.Debug("Analyzing dependencies for references to %v starting from %v", prefix, entrypoints) resultMap := map[string]struct{}{} visited := map[*ast.Rule]struct{}{} var queue []*ast.Rule missing := []ast.Ref{} for _, ref := range entrypoints { rules := c.GetRulesForVirtualDocument(ref) if len(rules) == 0 { missing = append(missing, ref) } queue = append(queue, rules...) } if len(missing) > 0 { return nil, missing, nil } for len(queue) > 0 { var next *ast.Rule next, queue = queue[0], queue[1:] if _, ok := visited[next]; ok { continue } visited[next] = struct{}{} for a := range c.Graph.Dependencies(next) { queue = append(queue, a.(*ast.Rule)) } deps, err := dependencies.Minimal(next) if err != nil { logrus.Errorf("Analysis error for %v: %v", next.Location, err) continue } logrus.Debugf("Analyzed %v and found %v", next.Location, deps) for _, ref := range deps { if ref.HasPrefix(prefix) && len(ref) > len(prefix) { if s, ok := ref[len(prefix)].Value.(ast.String); ok { resultMap[string(s)] = struct{}{} } } } } var result []ref for x := range resultMap { result = append(result, ref{Resource: x}) } sort.Slice(result, func(i, j int) bool { return result[i].Resource < result[j].Resource }) return result, nil, nil } ================================================ FILE: pkg/dynamicdata/dynamicdata_test.go ================================================ package dynamicdata import ( "context" "fmt" "testing" //lint:ignore SA1019 using OPA v0.x to ensure backwards compatible with pre-1.0 bundles sdktest "github.com/open-policy-agent/opa/sdk/test" "github.com/open-policy-agent/opa/v1/logging" ) func TestAnalyzer(t *testing.T) { ctx := context.Background() s := sdktest.MustNewServer(sdktest.RawBundles(true), sdktest.MockBundle("/bundles/bundle.tar.gz", map[string]string{ ".manifest": `{"roots": ["main"]}`, "main/main.rego": `package main import rego.v1 main if { data.kubernetes.resources.pods[ns][_].metadata.labels.badlabel == "badbadbad"; r2 } r2 if { data.kubernetes.resources.namespaces["default"].metadata.labels.foo == "bar" }`, })) defer s.Stop() config := fmt.Appendf(nil, `{ services: { test: { url: "%v/bundles" } }, bundles: { test: { service: test, resource: bundle.tar.gz } } }`, s.URL()) a, err := newAnalyzer(ctx, config, "kubernetes/resources", "main/main", logging.New()) if err != nil { t.Fatal(err) } result := <-a.C if len(result.Refs) != 2 || result.Refs[0].Resource != "namespaces" || result.Refs[1].Resource != "pods" { t.Fatalf("expected to identify pods reference but got: %v", result) } if err := a.Stop(ctx); err != nil { t.Fatal(err) } } func TestAnalyzerNoDeps(t *testing.T) { ctx := context.Background() s := sdktest.MustNewServer(sdktest.RawBundles(true), sdktest.MockBundle("/bundles/bundle.tar.gz", map[string]string{ ".manifest": `{"roots": ["main"]}`, "main/main.rego": `package main import rego.v1 main if { true }`, })) defer s.Stop() config := fmt.Appendf(nil, `{ services: { test: { url: "%v/bundles" } }, bundles: { test: { service: test, resource: bundle.tar.gz } } }`, s.URL()) a, err := newAnalyzer(ctx, config, "kubernetes/resources", "main/main", logging.New()) if err != nil { t.Fatal(err) } result := <-a.C if len(result.Refs) != 0 { t.Fatalf("expected not to identify any resources but got: %v", result) } if err := a.Stop(ctx); err != nil { t.Fatal(err) } } ================================================ FILE: pkg/opa/opa.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package opa import ( "bytes" "encoding/json" "fmt" "io" "net/http" "strings" ) // Error contains the standard error fields returned by OPA. type Error struct { Code string `json:"code"` Message string `json:"message"` Errors json.RawMessage `json:"errors,omitempty"` } func (err *Error) Error() string { return fmt.Sprintf("code %v: %v", err.Code, err.Message) } // Undefined represents an undefined response from OPA. type Undefined struct{} func (Undefined) Error() string { return "undefined" } // IsUndefinedErr returns true if the err represents an undefined result from // OPA. func IsUndefinedErr(err error) bool { _, ok := err.(Undefined) return ok } // Client defines the OPA client interface. type Client interface { Policies Data } // Policies defines the policy management interface in OPA. type Policies interface { InsertPolicy(id string, bs []byte) error DeletePolicy(id string) error } // Data defines the interface for pushing and querying data in OPA. type Data interface { Prefix(path string) Data PatchData(path string, op string, value *interface{}) error PutData(path string, value interface{}) error PostData(path string, value interface{}) (json.RawMessage, error) } // New returns a new Client object. func New(url string, auth string) Client { return &httpClient{strings.TrimRight(url, "/"), "", auth} } type httpClient struct { url string prefix string authentication string } func (c *httpClient) Prefix(path string) Data { cpy := *c cpy.prefix = joinPaths("/", c.prefix, path) return &cpy } func (c *httpClient) PatchData(path string, op string, value *interface{}) error { buf, err := c.makePatch(path, op, value) if err != nil { return err } resp, err := c.do("PATCH", slashPath("data"), buf) if err != nil { return err } return c.handleErrors(resp) } func (c *httpClient) PutData(path string, value interface{}) error { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(value); err != nil { return err } absPath := slashPath("data", c.prefix, path) resp, err := c.do("PUT", absPath, &buf) if err != nil { return err } return c.handleErrors(resp) } func (c *httpClient) PostData(path string, value interface{}) (json.RawMessage, error) { var buf bytes.Buffer var input struct { Input interface{} `json:"input"` } input.Input = value if err := json.NewEncoder(&buf).Encode(input); err != nil { return nil, err } absPath := slashPath("data", c.prefix, path) resp, err := c.do("POST", absPath, &buf) if err != nil { return nil, err } var result struct { Result json.RawMessage `json:"result"` Error map[string]interface{} `json:"error"` } if resp.StatusCode != 200 { return nil, c.handleErrors(resp) } if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { return nil, err } if result.Result == nil { return nil, Undefined{} } return result.Result, nil } func (c *httpClient) InsertPolicy(id string, bs []byte) error { buf := bytes.NewBuffer(bs) path := slashPath("policies", id) resp, err := c.do("PUT", path, buf) if err != nil { return err } return c.handleErrors(resp) } func (c *httpClient) DeletePolicy(id string) error { path := slashPath("policies", id) resp, err := c.do("DELETE", path, nil) if err != nil { return err } return c.handleErrors(resp) } func (c *httpClient) makePatch(path, op string, value *interface{}) (io.Reader, error) { patch := []struct { Path string `json:"path"` Op string `json:"op"` Value *interface{} `json:"value,omitempty"` }{ { Path: slashPath(c.prefix, path), Op: op, Value: value, }, } var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(patch); err != nil { return nil, err } return &buf, nil } func (c *httpClient) handleErrors(resp *http.Response) error { defer resp.Body.Close() if resp.StatusCode >= 200 && resp.StatusCode < 300 { return nil } var err Error if err := json.NewDecoder(resp.Body).Decode(&err); err != nil { return err } return &err } func (c *httpClient) do(verb, path string, body io.Reader) (*http.Response, error) { url := c.url + path req, err := http.NewRequest(verb, url, body) if err != nil { return nil, err } if c.authentication != "" { req.Header.Set("Authorization", "Bearer "+c.authentication) } return http.DefaultClient.Do(req) } func slashPath(paths ...string) string { return makePath("/", paths...) } func makePath(join string, paths ...string) string { return join + joinPaths(join, paths...) } func joinPaths(join string, paths ...string) string { parts := []string{} for _, path := range paths { path = strings.Trim(path, join) if path != "" { parts = append(parts, path) } } return strings.Join(parts, join) } ================================================ FILE: pkg/opa/opa_test.go ================================================ // Copyright 2018 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package opa import ( "encoding/json" "io" "reflect" "testing" ) func TestHTTPClientMakePatch(t *testing.T) { tests := []struct { prefix string path string op string value string want string }{ { prefix: "", path: "foo", op: "add", value: "true", want: `[{ "path": "/foo", "op": "add", "value": true }]`, }, { prefix: "", path: "default/foo", op: "remove", value: "", want: `[{ "path": "/default/foo", "op": "remove" }]`, }, { prefix: "type", path: "default/foo", op: "remove", value: "", want: `[{ "path": "/type/default/foo", "op": "remove" }]`, }, { prefix: "/type1/subtypeA/", path: "default/foo", op: "remove", value: "", want: `[{ "path": "/type1/subtypeA/default/foo", "op": "remove" }]`, }, } for _, tc := range tests { client := &httpClient{"URL", tc.prefix, ""} var value *interface{} if tc.value != "" { var x interface{} if err := json.Unmarshal([]byte(tc.value), &x); err != nil { panic(err) } value = &x } patch := mustMakePatch(client, tc.path, tc.op, value) var expected interface{} if err := json.Unmarshal([]byte(tc.want), &expected); err != nil { panic(err) } if !reflect.DeepEqual(patch, expected) { t.Errorf("Expected %v but got: %v", expected, patch) } } } func mustMakePatch(client *httpClient, path, op string, value *interface{}) interface{} { buf, err := client.makePatch(path, op, value) if err != nil { panic(err) } return mustUnmarshalJSON(buf) } func mustUnmarshalJSON(r io.Reader) interface{} { var x interface{} err := json.NewDecoder(r).Decode(&x) if err != nil { panic(err) } return x } ================================================ FILE: pkg/types/types.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. // Package types contains type information used by controllers. package types import "strings" // ResourceType describes a resource type in Kubernetes. type ResourceType struct { // Namespaced indicates if this kind is namespaced. Namespaced bool Resource string Group string Version string } func (t ResourceType) String() string { parts := []string{} if t.Group != "" { parts = append(parts, t.Group) } if t.Version != "" { parts = append(parts, t.Version) } if t.Resource != "" { parts = append(parts, t.Resource) } return strings.Join(parts, "/") } ================================================ FILE: pkg/version/version.go ================================================ // Copyright 2017 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package version // Version information set by build process. var ( Version = "" Git = "" ) ================================================ FILE: test/e2e/custom_config/1_bundle_loaded.hurl ================================================ GET https://localhost:8443/v1/data Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result.test_helm_kubernetes_quickstart.*" count == 3 ================================================ FILE: test/e2e/custom_config/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: custom-config spec: namespace: default steps: - name: bundle is loaded try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure --retry 15 --retry-interval 2s 1_bundle_loaded.hurl ================================================ FILE: test/e2e/custom_config/values.yaml ================================================ opa: services: controller: url: 'https://www.openpolicyagent.org' bundles: quickstart: service: controller resource: /external-resources/bundles/helm-kubernetes-quickstart default_decision: /helm_kubernetes_quickstart/main ================================================ FILE: test/e2e/custom_mgmt_token/1_policy_loaded.hurl ================================================ GET https://localhost:8443/v1/policies Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result[?(@.id == 'default/policy-include/include.rego')]" count >= 1 GET https://localhost:8443/v1/data/example/include/allow Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result" == true ================================================ FILE: test/e2e/custom_mgmt_token/2_data_loaded.hurl ================================================ GET https://localhost:8443/v1/data/default Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result.*" count == 1 jsonpath "$.result.data-include" exists GET https://localhost:8443/v1/data/default/data-include Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result['include.json'].inKey" == "inValue" ================================================ FILE: test/e2e/custom_mgmt_token/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: custom-mgmt-token spec: namespace: default steps: - name: apply fixtures try: - apply: file: ../fixture.yaml cleanup: - delete: file: ../fixture.yaml - name: policy is loaded and evaluated correctly try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure --retry 3 1_policy_loaded.hurl - name: data is loaded correctly try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure 2_data_loaded.hurl - name: configmap status annotations are ok try: - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: openpolicyagent.org/policy: rego annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: openpolicyagent.org/data: opa annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' ================================================ FILE: test/e2e/custom_mgmt_token/values.yaml ================================================ e2eMgmtTokenSecret: true opa: replicas: 2 authz: enabled: true mgmtToken: secretName: mgmt-token-secret ================================================ FILE: test/e2e/default/1_initial_state.hurl ================================================ GET https://localhost:8443/v1/data Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result.*" count == 0 ================================================ FILE: test/e2e/default/2_policy_loaded.hurl ================================================ GET https://localhost:8443/v1/policies Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result[?(@.id == 'default/policy-include/include.rego')]" count >= 1 GET https://localhost:8443/v1/data/example/include/allow Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result" == true ================================================ FILE: test/e2e/default/3_data_loaded.hurl ================================================ GET https://localhost:8443/v1/data/default Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result.*" count == 1 jsonpath "$.result.data-include" exists GET https://localhost:8443/v1/data/default/data-include Authorization: Bearer {{token}} HTTP 200 [Asserts] jsonpath "$.result['include.json'].inKey" == "inValue" ================================================ FILE: test/e2e/default/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: default spec: namespace: default steps: - name: initial state is empty try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure --retry 3 1_initial_state.hurl - name: apply fixtures try: - apply: file: ../fixture.yaml cleanup: - delete: file: ../fixture.yaml - name: policy is loaded and evaluated correctly try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure --retry 3 2_policy_loaded.hurl - name: data is loaded correctly try: - script: content: | hurl --variable token=$OPA_TOKEN --insecure 3_data_loaded.hurl - name: configmap status annotations are ok try: - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: openpolicyagent.org/policy: rego annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: openpolicyagent.org/data: opa annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' ================================================ FILE: test/e2e/default/values.yaml ================================================ ================================================ FILE: test/e2e/fixture-labels.yaml ================================================ --- kind: ConfigMap metadata: name: policy-include labels: kube-mgmt/e2e: "true" qweqwe/policy: "111" apiVersion: v1 data: include.rego: | package example.include allow := true --- kind: ConfigMap metadata: name: policy-exclude labels: kube-mgmt/e2e: "true" openpolicyagent.org/policy: rego apiVersion: v1 data: exclude.rego: | package example.exclude allow := true --- kind: ConfigMap metadata: name: data-include labels: kube-mgmt/e2e: "true" asdasd/data: "222" apiVersion: v1 data: include.json: | {"inKey": "inValue"} --- kind: ConfigMap metadata: name: data-exclude labels: kube-mgmt/e2e: "true" openpolicyagent.org/data: opa apiVersion: v1 data: exclude.json: | {"exKey": "exValue"} --- kind: ConfigMap metadata: name: data-labeless labels: kube-mgmt/e2e: "true" apiVersion: v1 data: labeless.json: | {"lbKey": "lbValue"} --- kind: ConfigMap metadata: name: policy-labeless labels: kube-mgmt/e2e: "true" apiVersion: v1 data: labeless.rego: | package example.labeless allow := true ================================================ FILE: test/e2e/fixture-multi.yaml ================================================ --- kind: ConfigMap metadata: name: multi-file-policy labels: kube-mgmt/e2e: "true" openpolicyagent.org/policy: rego apiVersion: v1 data: a.rego: | package my_pkg import rego.v1 import data.my_pkg.functions.my_func default my_rule := false my_rule if { my_func(input.hello) } b.rego: | package my_pkg.functions my_func(str) := startswith("world", str) --- kind: ConfigMap metadata: name: multi-file-fail-policy labels: kube-mgmt/e2e: "true" openpolicyagent.org/policy: rego apiVersion: v1 data: f.rego: | package my_pkg_fail import rego.v1 import data.my_pkg_fail.functions.my_func default my_rule := false my_rule if { my_func(input.hello) } ================================================ FILE: test/e2e/fixture-replication.yaml ================================================ --- apiVersion: v1 kind: Namespace metadata: name: ignore-me --- apiVersion: v1 kind: Namespace metadata: name: dont-ignore-me --- apiVersion: v1 kind: Service metadata: name: dont-ignore-me namespace: dont-ignore-me labels: kube-mgmt/e2e: "true" spec: ports: - name: http port: 8080 --- apiVersion: v1 kind: Service metadata: name: ignore-me namespace: ignore-me labels: kube-mgmt/e2e: "true" spec: ports: - name: http port: 8080 ================================================ FILE: test/e2e/fixture.yaml ================================================ --- kind: ConfigMap metadata: name: policy-include labels: kube-mgmt/e2e: "true" openpolicyagent.org/policy: rego apiVersion: v1 data: include.rego: | package example.include allow := true --- kind: ConfigMap metadata: name: policy-exclude labels: kube-mgmt/e2e: "true" openpolicyagent.org/policy: qwerty apiVersion: v1 data: exclude.rego: | package example.exclude allow := true --- kind: ConfigMap metadata: name: data-include labels: kube-mgmt/e2e: "true" openpolicyagent.org/data: opa apiVersion: v1 data: include.json: | {"inKey": "inValue"} --- kind: ConfigMap metadata: name: data-exclude labels: kube-mgmt/e2e: "true" openpolicyagent.org/data: qwerty apiVersion: v1 data: exclude.json: | {"exKey": "exValue"} --- kind: ConfigMap metadata: name: data-labeless labels: kube-mgmt/e2e: "true" apiVersion: v1 data: labeless.json: | {"lbKey": "lbValue"} --- kind: ConfigMap metadata: name: policy-labeless labels: kube-mgmt/e2e: "true" apiVersion: v1 data: labeless.rego: | package example.labeless allow := true ================================================ FILE: test/e2e/labels/1_initial_state.hurl ================================================ GET http://localhost:8080/v1/data HTTP 200 [Asserts] jsonpath "$.result.*" count == 0 ================================================ FILE: test/e2e/labels/2_policy_loaded.hurl ================================================ GET http://localhost:8080/v1/policies HTTP 200 [Asserts] jsonpath "$.result[?(@.id == 'default/policy-include/include.rego')]" count >= 1 GET http://localhost:8080/v1/data/example/include/allow HTTP 200 [Asserts] jsonpath "$.result" == true ================================================ FILE: test/e2e/labels/3_data_loaded.hurl ================================================ GET http://localhost:8080/v1/data/default HTTP 200 [Asserts] jsonpath "$.result.*" count == 1 jsonpath "$.result.data-include" exists GET http://localhost:8080/v1/data/default/data-include HTTP 200 [Asserts] jsonpath "$.result['include.json'].inKey" == "inValue" ================================================ FILE: test/e2e/labels/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: labels spec: namespace: default steps: - name: initial state is empty try: - script: content: | hurl --retry 3 1_initial_state.hurl - name: apply fixtures try: - apply: file: ../fixture-labels.yaml cleanup: - delete: file: ../fixture-labels.yaml - name: policy is loaded and evaluated correctly try: - script: content: | hurl --retry 3 2_policy_loaded.hurl - name: data is loaded correctly try: - script: content: | hurl 3_data_loaded.hurl - name: configmap status annotations are ok try: - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: qweqwe/policy: "111" annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' - assert: resource: apiVersion: v1 kind: ConfigMap metadata: labels: asdasd/data: "222" annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' ================================================ FILE: test/e2e/labels/values.yaml ================================================ useHttps: false opa: null authz: enabled: false mgmt: startupProbe: httpGet: scheme: HTTP extraArgs: - "--policy-label=qweqwe/policy" - "--policy-value=111" - "--data-label=asdasd/data" - "--data-value=222" - "--log-level=debug" ================================================ FILE: test/e2e/multi/1_initial_state.hurl ================================================ GET http://localhost:8080/v1/data/my_pkg/my_rule HTTP 200 [Asserts] jsonpath "$.result" not exists ================================================ FILE: test/e2e/multi/2_policies_loaded.hurl ================================================ GET http://localhost:8080/v1/policies HTTP 200 [Asserts] jsonpath "$.result" count == 2 jsonpath "$.result[?(@.id == 'default/multi-file-policy/a.rego')]" count >= 1 jsonpath "$.result[?(@.id == 'default/multi-file-policy/b.rego')]" count >= 1 POST http://localhost:8080/v1/data/my_pkg/my_rule Content-Type: application/json {"input": {"hello": "world"}} HTTP 200 [Asserts] jsonpath "$.result" == true POST http://localhost:8080/v1/data/my_pkg/my_rule Content-Type: application/json {"input": {"hello": "incorrect"}} HTTP 200 [Asserts] jsonpath "$.result" == false ================================================ FILE: test/e2e/multi/3_policy_unloaded.hurl ================================================ GET http://localhost:8080/v1/data/my_pkg/my_rule HTTP 200 [Asserts] jsonpath "$.result" not exists ================================================ FILE: test/e2e/multi/4_policies_reloaded.hurl ================================================ GET http://localhost:8080/v1/policies HTTP 200 [Asserts] jsonpath "$.result" count == 2 jsonpath "$.result[?(@.id == 'default/multi-file-policy/a.rego')]" count >= 1 jsonpath "$.result[?(@.id == 'default/multi-file-policy/b.rego')]" count >= 1 POST http://localhost:8080/v1/data/my_pkg/my_rule Content-Type: application/json {"input": {"hello": "world"}} HTTP 200 [Asserts] jsonpath "$.result" == true POST http://localhost:8080/v1/data/my_pkg/my_rule Content-Type: application/json {"input": {"hello": "incorrect"}} HTTP 200 [Asserts] jsonpath "$.result" == false ================================================ FILE: test/e2e/multi/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: multi spec: namespace: default steps: - name: initial state is empty try: - script: content: | hurl --retry 3 1_initial_state.hurl - name: apply fixtures try: - apply: file: ../fixture-multi.yaml cleanup: - delete: file: ../fixture-multi.yaml - name: policies are loaded and evaluated correctly try: - script: content: | hurl --retry 3 2_policies_loaded.hurl - name: configmap status annotations are ok try: - assert: resource: apiVersion: v1 kind: ConfigMap metadata: name: multi-file-policy annotations: openpolicyagent.org/kube-mgmt-status: '{"status":"ok"}' openpolicyagent.org/kube-mgmt-retries: "0" - script: content: | kubectl get cm multi-file-fail-policy -o json \ | yq -e '.metadata.annotations["openpolicyagent.org/kube-mgmt-status"] | from_json | .status == "error"' kubectl get cm multi-file-fail-policy -o json \ | yq -e '.metadata.annotations["openpolicyagent.org/kube-mgmt-retries"] == "0"' - name: remove policy label try: - script: content: | kubectl patch cm multi-file-policy --type=merge \ -p '{"metadata":{"labels":{"openpolicyagent.org/policy":""}}}' - name: policy is unloaded try: - script: content: | hurl --retry 3 3_policy_unloaded.hurl - name: re-add policy label try: - script: content: | kubectl label --overwrite cm multi-file-policy openpolicyagent.org/policy=rego - name: policies are reloaded and evaluated correctly try: - script: content: | hurl --retry 3 4_policies_reloaded.hurl ================================================ FILE: test/e2e/multi/values.yaml ================================================ useHttps: false opa: null authz: enabled: false mgmt: startupProbe: httpGet: scheme: HTTP extraArgs: - "--log-level=debug" ================================================ FILE: test/e2e/replicate/1_replication.hurl ================================================ GET http://localhost:8080/v1/data/kubernetes/services/ignore-me HTTP 200 [Asserts] jsonpath "$.result" not exists GET http://localhost:8080/v1/data/kubernetes/services/dont-ignore-me HTTP 200 [Asserts] jsonpath "$.result.*" count == 1 jsonpath "$.result.dont-ignore-me" exists GET http://localhost:8080/v1/data/kubernetes/services/default HTTP 200 [Asserts] jsonpath "$.result.*" count == 2 ================================================ FILE: test/e2e/replicate/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: replicate spec: steps: - name: apply fixtures try: - apply: file: ../fixture-replication.yaml cleanup: - delete: file: ../fixture-replication.yaml - name: replication is correct try: - script: content: | hurl --retry 3 1_replication.hurl ================================================ FILE: test/e2e/replicate/values.yaml ================================================ useHttps: false opa: null authz: enabled: false mgmt: data: enabled: false policies: enabled: false startupProbe: httpGet: scheme: HTTP replicate: ignoreNs: - "ignore-me" namespace: - v1/services extraArgs: - "--log-level=debug" rbac: extraRules: - apiGroups: [""] resources: ["services"] verbs: ["*"] ================================================ FILE: test/e2e/replicate_auto/.gitignore ================================================ bundle.tar.gz ================================================ FILE: test/e2e/replicate_auto/1_replication.hurl ================================================ GET http://localhost:8080/v1/data/kubernetes/services/ignore-me HTTP 200 [Asserts] jsonpath "$.result" not exists GET http://localhost:8080/v1/data/kubernetes/services/dont-ignore-me HTTP 200 [Asserts] jsonpath "$.result.*" count == 1 jsonpath "$.result.dont-ignore-me" exists GET http://localhost:8080/v1/data/kubernetes/services/default HTTP 200 [Asserts] jsonpath "$.result.*" count == 2 ================================================ FILE: test/e2e/replicate_auto/bundle/.manifest ================================================ { "roots": ["main"] } ================================================ FILE: test/e2e/replicate_auto/bundle/main.rego ================================================ package main import rego.v1 main if { some ns, name data.kubernetes.services[ns][name].metadata.labels == "foo" } ================================================ FILE: test/e2e/replicate_auto/chainsaw-test.yaml ================================================ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: replicate-auto spec: steps: - name: apply fixtures try: - apply: file: ../fixture-replication.yaml cleanup: - delete: file: ../fixture-replication.yaml - name: replication is correct try: - script: content: | hurl --retry 3 1_replication.hurl ================================================ FILE: test/e2e/replicate_auto/values.yaml ================================================ useHttps: false opa: { bundles: { test: { resource: file:///bundle/bundle.tar.gz } } } authz: enabled: false mgmt: data: enabled: false policies: enabled: false startupProbe: httpGet: scheme: HTTP replicate: ignoreNs: - "ignore-me" auto: true extraArgs: - "--log-level=debug" rbac: extraRules: - apiGroups: [""] resources: ["services"] verbs: ["*"] extraVolumes: - name: bundle-vol configMap: name: bundle extraVolumeMounts: - name: bundle-vol mountPath: /bundle ================================================ FILE: test/lint/images.yaml ================================================ suite: lint image and mgmt.image templates: - fake.yaml tests: - it: image is null set: image: null asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '': missing property 'image' - it: image.repository not string set: image: repository: 5 asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/image/repository': got number, want string - it: image.tag not string set: image: tag: 5 asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/image/tag': got number, want string - it: mgmt.image is null set: mgmt: image: null asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/mgmt': missing property 'image' - it: mgmt.image.repository not string set: mgmt: image: repository: 5 asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/mgmt/image/repository': got number, want string - it: mgmt.image.tag not string set: mgmt: image: tag: 5 asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/mgmt/image/tag': got number, want string ================================================ FILE: test/lint/sa.yaml ================================================ suite: lint serviceaccount templates: - fake.yaml tests: - it: annotations not string set: serviceAccount: annotations: foo: 1 asserts: - failedTemplate: errorPattern: "got number, want string" ================================================ FILE: test/lint/service.yaml ================================================ suite: lint service templates: - service.yaml tests: - it: fails when service annotation is boolean set: service: annotations: bar: true asserts: - failedTemplate: errorPattern: "got boolean, want string" - it: fails when service annotation is array set: service: annotations: baz: ["invalid"] asserts: - failedTemplate: errorPattern: "got array, want string" - it: fails when service annotation is object set: service: annotations: foo: bar: baz asserts: - failedTemplate: errorPattern: "got object, want string" - it: trafficDistribution invalid value set: service: trafficDistribution: "InvalidValue" asserts: - failedTemplate: errorMessage: | values don't meet the specifications of the schema(s) in the following chart(s): opa-kube-mgmt: - at '/service/trafficDistribution': value must be one of 'PreferClose', 'PreferSameNode', 'PreferSameZone', ================================================ FILE: test/lint/tsc.yaml ================================================ suite: lint topologySpreadConstraints templates: - deployment.yaml tests: - it: fails when maxSkew is missing set: topologySpreadConstraints: - topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" asserts: - failedTemplate: {} - it: fails when maxSkew is null set: topologySpreadConstraints: - maxSkew: null topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" asserts: - failedTemplate: {} - it: fails when topologyKey is missing set: topologySpreadConstraints: - maxSkew: 1 whenUnsatisfiable: "DoNotSchedule" asserts: - failedTemplate: {} - it: fails when whenUnsatisfiable is missing set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" asserts: - failedTemplate: {} - it: fails when maxSkew is not an integer set: topologySpreadConstraints: - maxSkew: "one" topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" asserts: - failedTemplate: {} - it: fails when topologyKey is empty string set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "" whenUnsatisfiable: "DoNotSchedule" asserts: - failedTemplate: {} - it: fails when whenUnsatisfiable has invalid value set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "InvalidOption" asserts: - failedTemplate: {} - it: renders with empty topologySpreadConstraints array set: topologySpreadConstraints: [] asserts: - isKind: of: Deployment - isEmpty: path: spec.template.spec.topologySpreadConstraints - it: renders without topologySpreadConstraints when not set asserts: - isKind: of: Deployment - isEmpty: path: spec.template.spec.topologySpreadConstraints ================================================ FILE: test/unit/health.yaml ================================================ suite: test health probes templates: - deployment.yaml tests: - it: should have only liveness and readiness for OPA asserts: - notExists: path: spec.template.spec.containers[0].startupProbe - exists: path: spec.template.spec.containers[0].readinessProbe - exists: path: spec.template.spec.containers[0].livenessProbe - equal: path: spec.template.spec.containers[0].readinessProbe.httpGet.scheme value: HTTPS - equal: path: spec.template.spec.containers[0].livenessProbe.httpGet.scheme value: HTTPS - it: should override scheme for liveness and readiness for OPA set: useHttps: false asserts: - equal: path: spec.template.spec.containers[0].readinessProbe.httpGet.scheme value: HTTP - equal: path: spec.template.spec.containers[0].livenessProbe.httpGet.scheme value: HTTP - it: should have only startup for kube-mgmt asserts: - exists: path: spec.template.spec.containers[1].startupProbe - notExists: path: spec.template.spec.containers[1].readinessProbe - notExists: path: spec.template.spec.containers[1].livenessProbe - equal: path: spec.template.spec.containers[1].startupProbe.httpGet.scheme value: HTTPS - it: should override startup for kube-mgmt set: mgmt: startupProbe: failureThreshold: 11 timeoutSeconds: 22 asserts: - exists: path: spec.template.spec.containers[1].startupProbe - notExists: path: spec.template.spec.containers[1].readinessProbe - notExists: path: spec.template.spec.containers[1].livenessProbe - equal: path: spec.template.spec.containers[1].startupProbe.failureThreshold value: 11 - equal: path: spec.template.spec.containers[1].startupProbe.timeoutSeconds value: 22 ================================================ FILE: test/unit/kube-mgmt_args.yaml ================================================ suite: test kube-mgmt container args templates: - deployment.yaml tests: - it: should have default args asserts: - contains: path: spec.template.spec.containers[1].args content: "--enable-data=true" - contains: path: spec.template.spec.containers[1].args content: "--enable-data=true" - contains: path: spec.template.spec.containers[1].args content: "--namespaces=NAMESPACE" - contains: path: spec.template.spec.containers[1].args content: "--replicate-ignore-namespaces=" - it: should override args set: mgmt: namespaces: ["111", "222"] replicate: ignoreNs: ["qwe", "asd"] asserts: - contains: path: spec.template.spec.containers[1].args content: "--replicate-ignore-namespaces=qwe,asd" - contains: path: spec.template.spec.containers[1].args content: "--namespaces=111,222" - it: should override all namespaces 1 set: mgmt: namespaces: ["*"] asserts: - contains: path: spec.template.spec.containers[1].args content: "--namespaces=*" - it: should override all namespaces 2 set: mgmt: namespaces: "*" asserts: - contains: path: spec.template.spec.containers[1].args content: "--namespaces=*" - it: should add extraVolumes if authz is disabled & no bootstrapPolicies are provided set: useHttps: false authz: enabled: false extraVolumes: - name: example-app-auth-config secret: secretName: example-app-auth-config asserts: - contains: path: spec.template.spec.volumes content: name: example-app-auth-config secret: secretName: example-app-auth-config ================================================ FILE: test/unit/rbac_cm.yaml ================================================ suite: test configmap rbac templates: - rbac-mgmt.yaml tests: - it: should create current namespace role by default asserts: - hasDocuments: count: 2 - containsDocument: kind: Role apiVersion: rbac.authorization.k8s.io/v1 namespace: NAMESPACE any: true - containsDocument: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 namespace: NAMESPACE any: true - it: should create namespace roles when namespaces configured set: mgmt: namespaces: ["qwe", "asd"] asserts: - hasDocuments: count: 4 - containsDocument: kind: Role apiVersion: rbac.authorization.k8s.io/v1 namespace: "qwe" any: true - containsDocument: kind: Role apiVersion: rbac.authorization.k8s.io/v1 namespace: "asd" any: true - containsDocument: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 namespace: "qwe" any: true - containsDocument: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 namespace: "asd" any: true - it: should create cluster role if namespace is asterisk set: mgmt: namespaces: "*" asserts: - hasDocuments: count: 2 - containsDocument: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 any: true - containsDocument: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 any: true - it: should create cluster role if namespace is single item array with asterisk set: mgmt: namespaces: ["*"] asserts: - hasDocuments: count: 2 - containsDocument: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 any: true - containsDocument: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 any: true - it: should not create roles if rbac disabled set: rbac: create: false asserts: - hasDocuments: count: 0 - it: should not create roles if mgmt disabled set: mgmt: enabled: false asserts: - hasDocuments: count: 0 ================================================ FILE: test/unit/rbac_replicate.yaml ================================================ suite: test replicate rbac templates: - rbac-mgmt-replicate.yaml tests: - it: should not create cluster role by default asserts: - hasDocuments: count: 0 - it: should create cluster role if has namespace set: mgmt: replicate: namespace: ["qwe"] asserts: - hasDocuments: count: 2 - containsDocument: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 any: true - containsDocument: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 any: true - it: should create cluster role if has cluster set: mgmt: replicate: cluster: ["qwe"] asserts: - hasDocuments: count: 2 - containsDocument: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 any: true - containsDocument: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 any: true - it: should not create cluster role if rbac disabled set: rbac: create: false mgmt: replicate: namespace: ["qwe"] asserts: - hasDocuments: count: 0 - it: should not create cluster role if mgmt disabled set: mgmt: enabled: false replicate: namespace: ["qwe"] asserts: - hasDocuments: count: 0 ================================================ FILE: test/unit/sa.yaml ================================================ suite: test serviceaccount annotations templates: - serviceaccount.yaml tests: - it: should omit serviceaccount annotations by default asserts: - notExists: path: metadata.annotations - it: should render serviceaccount annotations when provided set: serviceAccount: annotations: foo: bar asserts: - exists: path: metadata.annotations - equal: path: metadata.annotations.foo value: bar ================================================ FILE: test/unit/service.yaml ================================================ suite: test service definition templates: - service.yaml tests: - it: should omit service annotations when null set: service.annotations: null asserts: - notExists: path: metadata.annotations.foo - it: should set service annotations set: service.annotations: foo: bar asserts: - exists: path: metadata.annotations.foo - equal: path: metadata.annotations.foo value: bar - it: should omit trafficDistribution by default asserts: - notExists: path: spec.trafficDistribution - it: should omit trafficDistribution when null set: service.trafficDistribution: null asserts: - notExists: path: spec.trafficDistribution ================================================ FILE: test/unit/tsc.yaml ================================================ suite: test topologySpreadConstraints templates: - deployment.yaml tests: - it: renders with DoNotSchedule policy set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" asserts: - isKind: of: Deployment - contains: path: spec.template.spec.topologySpreadConstraints content: maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" - it: renders multiple topologySpreadConstraints set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "traffic.kubernetes.io/region" whenUnsatisfiable: "ScheduleAnyway" - maxSkew: 2 topologyKey: "topology.kubernetes.io/zone" whenUnsatisfiable: "DoNotSchedule" asserts: - lengthEqual: path: spec.template.spec.topologySpreadConstraints count: 2 - contains: path: spec.template.spec.topologySpreadConstraints content: topologyKey: "traffic.kubernetes.io/region" any: true - contains: path: spec.template.spec.topologySpreadConstraints content: topologyKey: "topology.kubernetes.io/zone" any: true - it: renders with labelSelector set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" labelSelector: matchLabels: app: opa-kube-mgmt asserts: - contains: path: spec.template.spec.topologySpreadConstraints content: maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" labelSelector: matchLabels: app: opa-kube-mgmt - it: renders with minDomains set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" minDomains: 3 asserts: - contains: path: spec.template.spec.topologySpreadConstraints content: minDomains: 3 any: true - it: passes through constraint with labelSelector set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" labelSelector: matchLabels: app: opa-kube-mgmt matchExpressions: - key: environment operator: In values: - production - staging asserts: - equal: path: spec.template.spec.topologySpreadConstraints[0].labelSelector value: matchLabels: app: opa-kube-mgmt matchExpressions: - key: environment operator: In values: - production - staging - it: passes through constraint with nodeAffinityPolicy set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" nodeAffinityPolicy: "Honor" asserts: - equal: path: spec.template.spec.topologySpreadConstraints[0].nodeAffinityPolicy value: "Honor" - it: passes through constraint with nodeTaintsPolicy set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" nodeTaintsPolicy: "Ignore" asserts: - equal: path: spec.template.spec.topologySpreadConstraints[0].nodeTaintsPolicy value: "Ignore" - it: passes through constraint with matchLabelKeys set: topologySpreadConstraints: - maxSkew: 1 topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "DoNotSchedule" matchLabelKeys: - config asserts: - equal: path: spec.template.spec.topologySpreadConstraints[0].matchLabelKeys value: - config - it: passes through fully configured constraint set: topologySpreadConstraints: - maxSkew: 2 topologyKey: "topology.kubernetes.io/zone" whenUnsatisfiable: "ScheduleAnyway" minDomains: 3 nodeAffinityPolicy: "Honor" nodeTaintsPolicy: "Honor" matchLabelKeys: - pod-template-hash labelSelector: matchLabels: app: opa-kube-mgmt asserts: - equal: path: spec.template.spec.topologySpreadConstraints[0] value: maxSkew: 2 topologyKey: "topology.kubernetes.io/zone" whenUnsatisfiable: "ScheduleAnyway" minDomains: 3 nodeAffinityPolicy: "Honor" nodeTaintsPolicy: "Honor" matchLabelKeys: - pod-template-hash labelSelector: matchLabels: app: opa-kube-mgmt